From 02f76901caaf21980a4b83f3e0623682f9d51780 Mon Sep 17 00:00:00 2001
From: Robert Muir
Date: Tue, 13 Jul 2010 14:21:46 +0000
Subject: [PATCH] LUCENE-2532: randomize IndexWriter settings in search tests
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@963720 13f79535-47bb-0310-9956-ffa450edef68
---
.../lucene/search/BooleanFilterTest.java | 18 +-
.../lucene/search/ChainedFilterTest.java | 33 +-
.../lucene/search/DuplicateFilterTest.java | 12 +-
.../lucene/search/FuzzyLikeThisQueryTest.java | 21 +-
.../apache/lucene/search/TermsFilterTest.java | 12 +-
.../lucene/search/regex/TestRegexQuery.java | 30 +-
.../search/similar/TestMoreLikeThis.java | 177 ++--
.../pulsing/PulsingPostingsWriterImpl.java | 2 +-
.../lucene/document/TestBinaryDocument.java | 21 +-
.../apache/lucene/document/TestDocument.java | 304 +++----
.../lucene/index/RandomIndexWriter.java | 129 +++
.../lucene/search/BaseTestRangeFilter.java | 219 ++---
.../lucene/search/TestAutomatonQuery.java | 23 +-
.../search/TestAutomatonQueryUnicode.java | 21 +-
.../apache/lucene/search/TestBoolean2.java | 16 +-
.../search/TestBooleanMinShouldMatch.java | 35 +-
.../apache/lucene/search/TestBooleanOr.java | 26 +-
.../lucene/search/TestBooleanPrefixQuery.java | 40 +-
.../lucene/search/TestBooleanQuery.java | 5 +-
.../lucene/search/TestBooleanScorer.java | 50 +-
.../lucene/search/TestCustomSearcherSort.java | 377 ++++----
.../apache/lucene/search/TestDateFilter.java | 261 +++---
.../apache/lucene/search/TestDateSort.java | 20 +-
.../search/TestDisjunctionMaxQuery.java | 834 +++++++++---------
.../apache/lucene/search/TestDocBoost.java | 14 +-
.../apache/lucene/search/TestDocIdSet.java | 10 +-
.../lucene/search/TestExplanations.java | 18 +-
.../apache/lucene/search/TestFieldCache.java | 4 +-
.../search/TestFieldCacheRangeFilter.java | 25 +-
.../search/TestFieldCacheTermsFilter.java | 7 +-
.../lucene/search/TestFilteredQuery.java | 11 +-
.../apache/lucene/search/TestFuzzyQuery.java | 52 +-
.../apache/lucene/search/TestFuzzyQuery2.java | 26 +-
.../lucene/search/TestMultiPhraseQuery.java | 387 ++++----
.../search/TestMultiTermConstantScore.java | 61 +-
.../TestMultiValuedNumericRangeQuery.java | 13 +-
.../org/apache/lucene/search/TestNot.java | 16 +-
.../search/TestNumericRangeQuery32.java | 19 +-
.../search/TestNumericRangeQuery64.java | 17 +-
.../lucene/search/TestPhrasePrefixQuery.java | 128 +--
.../apache/lucene/search/TestPhraseQuery.java | 55 +-
.../lucene/search/TestPositionIncrement.java | 17 +-
.../lucene/search/TestPrefixFilter.java | 15 +-
.../search/TestPrefixInBooleanQuery.java | 30 +-
.../apache/lucene/search/TestPrefixQuery.java | 14 +-
.../lucene/search/TestQueryWrapperFilter.java | 12 +-
.../apache/lucene/search/TestRegexpQuery.java | 19 +-
.../lucene/search/TestRegexpRandom.java | 20 +-
.../lucene/search/TestRegexpRandom2.java | 23 +-
.../apache/lucene/search/TestSimilarity.java | 14 +-
.../lucene/search/TestSloppyPhraseQuery.java | 22 +-
.../org/apache/lucene/search/TestSort.java | 12 +-
.../lucene/search/TestSpanQueryFilter.java | 9 +-
.../lucene/search/TestTermRangeFilter.java | 809 +++++++++--------
.../apache/lucene/search/TestTermScorer.java | 283 +++---
.../apache/lucene/search/TestTermVectors.java | 361 ++++----
.../search/TestTimeLimitingCollector.java | 17 +-
.../lucene/search/TestTopDocsCollector.java | 9 +-
.../search/TestTopScoreDocCollector.java | 18 +-
.../apache/lucene/search/TestWildcard.java | 17 +-
.../lucene/search/TestWildcardRandom.java | 22 +-
.../search/payloads/TestPayloadNearQuery.java | 25 +-
.../search/payloads/TestPayloadTermQuery.java | 22 +-
.../lucene/search/spans/TestBasics.java | 28 +-
.../spans/TestFieldMaskingSpanQuery.java | 16 +-
.../search/spans/TestNearSpansOrdered.java | 16 +-
.../apache/lucene/search/spans/TestSpans.java | 22 +-
.../search/spans/TestSpansAdvanced.java | 271 +++---
.../search/spans/TestSpansAdvanced2.java | 171 ++--
.../apache/lucene/util/LuceneTestCaseJ4.java | 30 +
70 files changed, 3260 insertions(+), 2633 deletions(-)
create mode 100644 lucene/src/test/org/apache/lucene/index/RandomIndexWriter.java
diff --git a/lucene/contrib/queries/src/test/org/apache/lucene/search/BooleanFilterTest.java b/lucene/contrib/queries/src/test/org/apache/lucene/search/BooleanFilterTest.java
index 2e768c1c87a..5386e51bee9 100644
--- a/lucene/contrib/queries/src/test/org/apache/lucene/search/BooleanFilterTest.java
+++ b/lucene/contrib/queries/src/test/org/apache/lucene/search/BooleanFilterTest.java
@@ -24,8 +24,8 @@ import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
@@ -38,7 +38,7 @@ public class BooleanFilterTest extends LuceneTestCase {
protected void setUp() throws Exception {
super.setUp();
directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), directory, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false)));
//Add series of docs with filterable fields : acces rights, prices, dates and "in-stock" flags
@@ -47,12 +47,18 @@ public class BooleanFilterTest extends LuceneTestCase {
addDoc(writer, "guest", "020", "20050101","Y");
addDoc(writer, "admin", "020", "20050101","Maybe");
addDoc(writer, "admin guest", "030", "20050101","N");
-
- writer.close();
- reader=IndexReader.open(directory, true);
+ reader = writer.getReader();
+ writer.close();
}
- private void addDoc(IndexWriter writer, String accessRights, String price, String date, String inStock) throws IOException
+ @Override
+ protected void tearDown() throws Exception {
+ reader.close();
+ directory.close();
+ super.tearDown();
+ }
+
+ private void addDoc(RandomIndexWriter writer, String accessRights, String price, String date, String inStock) throws IOException
{
Document doc=new Document();
doc.add(new Field("accessRights",accessRights,Field.Store.YES,Field.Index.ANALYZED));
diff --git a/lucene/contrib/queries/src/test/org/apache/lucene/search/ChainedFilterTest.java b/lucene/contrib/queries/src/test/org/apache/lucene/search/ChainedFilterTest.java
index 7839c7a854e..ee930b29902 100644
--- a/lucene/contrib/queries/src/test/org/apache/lucene/search/ChainedFilterTest.java
+++ b/lucene/contrib/queries/src/test/org/apache/lucene/search/ChainedFilterTest.java
@@ -19,12 +19,14 @@ package org.apache.lucene.search;
import java.util.Calendar;
import java.util.GregorianCalendar;
+import java.util.Random;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
@@ -47,18 +49,22 @@ public class ChainedFilterTest extends LuceneTestCase {
private RAMDirectory directory;
private IndexSearcher searcher;
+ private IndexReader reader;
private Query query;
// private DateFilter dateFilter; DateFilter was deprecated and removed
private TermRangeFilter dateFilter;
private QueryWrapperFilter bobFilter;
private QueryWrapperFilter sueFilter;
+ private Random random;
+
@Override
protected void setUp() throws Exception {
super.setUp();
+ random = newRandom();
directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer()));
+ RandomIndexWriter writer = new RandomIndexWriter(random, directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Calendar cal = new GregorianCalendar();
cal.clear();
@@ -73,10 +79,10 @@ public class ChainedFilterTest extends LuceneTestCase {
cal.add(Calendar.DATE, 1);
}
-
+ reader = writer.getReader();
writer.close();
- searcher = new IndexSearcher(directory, true);
+ searcher = new IndexSearcher(reader);
// query for everything to make life easier
BooleanQuery bq = new BooleanQuery();
@@ -96,6 +102,14 @@ public class ChainedFilterTest extends LuceneTestCase {
new TermQuery(new Term("owner", "sue")));
}
+ @Override
+ public void tearDown() throws Exception {
+ searcher.close();
+ reader.close();
+ directory.close();
+ super.tearDown();
+ }
+
private ChainedFilter getChainedFilter(Filter[] chain, int[] logic) {
if (logic == null) {
return new ChainedFilter(chain);
@@ -186,10 +200,12 @@ public class ChainedFilterTest extends LuceneTestCase {
public void testWithCachingFilter() throws Exception {
Directory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ RandomIndexWriter writer = new RandomIndexWriter(random, dir,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ IndexReader reader = writer.getReader();
writer.close();
- Searcher searcher = new IndexSearcher(dir, true);
+ Searcher searcher = new IndexSearcher(reader);
Query query = new TermQuery(new Term("none", "none"));
@@ -206,6 +222,9 @@ public class ChainedFilterTest extends LuceneTestCase {
// throws java.lang.ClassCastException: org.apache.lucene.util.OpenBitSet cannot be cast to java.util.BitSet
searcher.search(new MatchAllDocsQuery(), cf, 1);
+ searcher.close();
+ reader.close();
+ dir.close();
}
}
diff --git a/lucene/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java b/lucene/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java
index 7cfe74b02af..ee15b14f303 100644
--- a/lucene/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java
+++ b/lucene/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java
@@ -26,6 +26,7 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.MultiFields;
@@ -44,8 +45,8 @@ public class DuplicateFilterTest extends LuceneTestCase {
protected void setUp() throws Exception {
super.setUp();
directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer()));
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
//Add series of docs with filterable fields : url, text and dates flags
addDoc(writer, "http://lucene.apache.org", "lucene 1.4.3 available", "20040101");
@@ -56,9 +57,8 @@ public class DuplicateFilterTest extends LuceneTestCase {
addDoc(writer, "http://www.bar.com", "Dog uses Lucene", "20050101");
addDoc(writer, "http://lucene.apache.org", "Lucene 2.0 out", "20050101");
addDoc(writer, "http://lucene.apache.org", "Oops. Lucene 2.1 out", "20050102");
-
- writer.close();
- reader=IndexReader.open(directory, true);
+ reader = writer.getReader();
+ writer.close();
searcher =new IndexSearcher(reader);
}
@@ -71,7 +71,7 @@ public class DuplicateFilterTest extends LuceneTestCase {
super.tearDown();
}
- private void addDoc(IndexWriter writer, String url, String text, String date) throws IOException
+ private void addDoc(RandomIndexWriter writer, String url, String text, String date) throws IOException
{
Document doc=new Document();
doc.add(new Field(KEY_FIELD,url,Field.Store.YES,Field.Index.NOT_ANALYZED));
diff --git a/lucene/contrib/queries/src/test/org/apache/lucene/search/FuzzyLikeThisQueryTest.java b/lucene/contrib/queries/src/test/org/apache/lucene/search/FuzzyLikeThisQueryTest.java
index 82c24c6d06c..fd2881a75ea 100644
--- a/lucene/contrib/queries/src/test/org/apache/lucene/search/FuzzyLikeThisQueryTest.java
+++ b/lucene/contrib/queries/src/test/org/apache/lucene/search/FuzzyLikeThisQueryTest.java
@@ -24,8 +24,9 @@ import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
@@ -33,13 +34,15 @@ import org.apache.lucene.util.LuceneTestCase;
public class FuzzyLikeThisQueryTest extends LuceneTestCase {
private RAMDirectory directory;
private IndexSearcher searcher;
+ private IndexReader reader;
private Analyzer analyzer=new MockAnalyzer();
@Override
protected void setUp() throws Exception {
super.setUp();
directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
//Add series of docs with misspelt names
addDoc(writer, "jonathon smythe","1");
@@ -48,12 +51,20 @@ public class FuzzyLikeThisQueryTest extends LuceneTestCase {
addDoc(writer, "johnny smith","4" );
addDoc(writer, "jonny smith","5" );
addDoc(writer, "johnathon smythe","6");
-
+ reader = writer.getReader();
writer.close();
- searcher=new IndexSearcher(directory, true);
+ searcher=new IndexSearcher(reader);
}
- private void addDoc(IndexWriter writer, String name, String id) throws IOException
+ @Override
+ protected void tearDown() throws Exception {
+ searcher.close();
+ reader.close();
+ directory.close();
+ super.tearDown();
+ }
+
+ private void addDoc(RandomIndexWriter writer, String name, String id) throws IOException
{
Document doc=new Document();
doc.add(new Field("name",name,Field.Store.YES,Field.Index.ANALYZED));
diff --git a/lucene/contrib/queries/src/test/org/apache/lucene/search/TermsFilterTest.java b/lucene/contrib/queries/src/test/org/apache/lucene/search/TermsFilterTest.java
index b4ca9665d7e..9050d78db07 100644
--- a/lucene/contrib/queries/src/test/org/apache/lucene/search/TermsFilterTest.java
+++ b/lucene/contrib/queries/src/test/org/apache/lucene/search/TermsFilterTest.java
@@ -23,8 +23,8 @@ import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
@@ -53,16 +53,16 @@ public class TermsFilterTest extends LuceneTestCase {
public void testMissingTerms() throws Exception {
String fieldName="field1";
RAMDirectory rd=new RAMDirectory();
- IndexWriter w = new IndexWriter(rd, new IndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer()));
+ RandomIndexWriter w = new RandomIndexWriter(newRandom(), rd,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 100; i++) {
Document doc=new Document();
int term=i*10; //terms are units of 10;
doc.add(new Field(fieldName,""+term,Field.Store.YES,Field.Index.NOT_ANALYZED));
w.addDocument(doc);
}
+ IndexReader reader = w.getReader();
w.close();
- IndexReader reader = IndexReader.open(rd, true);
TermsFilter tf=new TermsFilter();
tf.addTerm(new Term(fieldName,"19"));
@@ -80,6 +80,8 @@ public class TermsFilterTest extends LuceneTestCase {
tf.addTerm(new Term(fieldName,"00"));
bits = (OpenBitSet)tf.getDocIdSet(reader);
assertEquals("Must match 2", 2, bits.cardinality());
-
+
+ reader.close();
+ rd.close();
}
}
diff --git a/lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestRegexQuery.java b/lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestRegexQuery.java
index 36a4473b894..b4c6aad6ba8 100644
--- a/lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestRegexQuery.java
+++ b/lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestRegexQuery.java
@@ -17,9 +17,11 @@ package org.apache.lucene.search.regex;
* limitations under the License.
*/
+import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
@@ -33,30 +35,30 @@ import org.apache.lucene.util.LuceneTestCase;
public class TestRegexQuery extends LuceneTestCase {
private IndexSearcher searcher;
+ private IndexReader reader;
+ private Directory directory;
private final String FN = "field";
@Override
protected void setUp() throws Exception {
super.setUp();
- RAMDirectory directory = new RAMDirectory();
- try {
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer()));
- Document doc = new Document();
- doc.add(new Field(FN, "the quick brown fox jumps over the lazy dog", Field.Store.NO, Field.Index.ANALYZED));
- writer.addDocument(doc);
- writer.optimize();
- writer.close();
- searcher = new IndexSearcher(directory, true);
- } catch (Exception e) {
- fail(e.toString());
- }
+ directory = new RAMDirectory();
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ Document doc = new Document();
+ doc.add(new Field(FN, "the quick brown fox jumps over the lazy dog", Field.Store.NO, Field.Index.ANALYZED));
+ writer.addDocument(doc);
+ reader = writer.getReader();
+ writer.close();
+ searcher = new IndexSearcher(reader);
}
@Override
protected void tearDown() throws Exception {
searcher.close();
+ reader.close();
+ directory.close();
super.tearDown();
}
diff --git a/lucene/contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java b/lucene/contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java
index 25e6c32f570..92f5d24e1ec 100644
--- a/lucene/contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java
+++ b/lucene/contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java
@@ -28,8 +28,8 @@ import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.IndexSearcher;
@@ -38,95 +38,94 @@ import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
public class TestMoreLikeThis extends LuceneTestCase {
- private RAMDirectory directory;
- private IndexReader reader;
- private IndexSearcher searcher;
-
- @Override
- protected void setUp() throws Exception {
- super.setUp();
- directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
-
- // Add series of docs with specific information for MoreLikeThis
- addDoc(writer, "lucene");
- addDoc(writer, "lucene release");
-
- writer.close();
- reader = IndexReader.open(directory, true);
- searcher = new IndexSearcher(reader);
+ private RAMDirectory directory;
+ private IndexReader reader;
+ private IndexSearcher searcher;
+
+ @Override
+ protected void setUp() throws Exception {
+ super.setUp();
+ directory = new RAMDirectory();
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+
+ // Add series of docs with specific information for MoreLikeThis
+ addDoc(writer, "lucene");
+ addDoc(writer, "lucene release");
+ reader = writer.getReader();
+ writer.close();
+ searcher = new IndexSearcher(reader);
+ }
+
+ @Override
+ protected void tearDown() throws Exception {
+ reader.close();
+ searcher.close();
+ directory.close();
+ super.tearDown();
+ }
+
+ private void addDoc(RandomIndexWriter writer, String text) throws IOException {
+ Document doc = new Document();
+ doc.add(new Field("text", text, Field.Store.YES, Field.Index.ANALYZED));
+ writer.addDocument(doc);
+ }
+
+ public void testBoostFactor() throws Throwable {
+ Map originalValues = getOriginalValues();
+
+ MoreLikeThis mlt = new MoreLikeThis(reader);
+ mlt.setAnalyzer(new MockAnalyzer(MockTokenizer.WHITESPACE, false));
+ mlt.setMinDocFreq(1);
+ mlt.setMinTermFreq(1);
+ mlt.setMinWordLen(1);
+ mlt.setFieldNames(new String[] {"text"});
+ mlt.setBoost(true);
+
+ // this mean that every term boost factor will be multiplied by this
+ // number
+ float boostFactor = 5;
+ mlt.setBoostFactor(boostFactor);
+
+ BooleanQuery query = (BooleanQuery) mlt.like(new StringReader(
+ "lucene release"));
+ List clauses = query.clauses();
+
+ assertEquals("Expected " + originalValues.size() + " clauses.",
+ originalValues.size(), clauses.size());
+
+ for (int i = 0; i < clauses.size(); i++) {
+ BooleanClause clause = clauses.get(i);
+ TermQuery tq = (TermQuery) clause.getQuery();
+ Float termBoost = originalValues.get(tq.getTerm().text());
+ assertNotNull("Expected term " + tq.getTerm().text(), termBoost);
+
+ float totalBoost = termBoost.floatValue() * boostFactor;
+ assertEquals("Expected boost of " + totalBoost + " for term '"
+ + tq.getTerm().text() + "' got " + tq.getBoost(), totalBoost, tq
+ .getBoost(), 0.0001);
}
-
- @Override
- protected void tearDown() throws Exception {
- reader.close();
- searcher.close();
- directory.close();
- super.tearDown();
- }
-
- private void addDoc(IndexWriter writer, String text) throws IOException {
- Document doc = new Document();
- doc.add(new Field("text", text, Field.Store.YES, Field.Index.ANALYZED));
- writer.addDocument(doc);
- }
-
- public void testBoostFactor() throws Throwable {
- Map originalValues = getOriginalValues();
-
- MoreLikeThis mlt = new MoreLikeThis(
- reader);
- mlt.setAnalyzer(new MockAnalyzer(MockTokenizer.WHITESPACE, false));
- mlt.setMinDocFreq(1);
- mlt.setMinTermFreq(1);
- mlt.setMinWordLen(1);
- mlt.setFieldNames(new String[] { "text" });
- mlt.setBoost(true);
-
- // this mean that every term boost factor will be multiplied by this
- // number
- float boostFactor = 5;
- mlt.setBoostFactor(boostFactor);
-
- BooleanQuery query = (BooleanQuery) mlt.like(new StringReader(
- "lucene release"));
- List clauses = query.clauses();
-
- assertEquals("Expected " + originalValues.size() + " clauses.",
- originalValues.size(), clauses.size());
-
- for (int i = 0; i < clauses.size(); i++) {
- BooleanClause clause = clauses.get(i);
- TermQuery tq = (TermQuery) clause.getQuery();
- Float termBoost = originalValues.get(tq.getTerm().text());
- assertNotNull("Expected term " + tq.getTerm().text(), termBoost);
-
- float totalBoost = termBoost.floatValue() * boostFactor;
- assertEquals("Expected boost of " + totalBoost + " for term '"
- + tq.getTerm().text() + "' got " + tq.getBoost(),
- totalBoost, tq.getBoost(), 0.0001);
- }
- }
-
- private Map getOriginalValues() throws IOException {
- Map originalValues = new HashMap();
- MoreLikeThis mlt = new MoreLikeThis(reader);
- mlt.setAnalyzer(new MockAnalyzer(MockTokenizer.WHITESPACE, false));
- mlt.setMinDocFreq(1);
- mlt.setMinTermFreq(1);
- mlt.setMinWordLen(1);
- mlt.setFieldNames(new String[] { "text" });
- mlt.setBoost(true);
- BooleanQuery query = (BooleanQuery) mlt.like(new StringReader(
- "lucene release"));
- List clauses = query.clauses();
-
- for (int i = 0; i < clauses.size(); i++) {
- BooleanClause clause = clauses.get(i);
- TermQuery tq = (TermQuery) clause.getQuery();
- originalValues.put(tq.getTerm().text(), Float.valueOf(tq.getBoost()));
- }
- return originalValues;
+ }
+
+ private Map getOriginalValues() throws IOException {
+ Map originalValues = new HashMap();
+ MoreLikeThis mlt = new MoreLikeThis(reader);
+ mlt.setAnalyzer(new MockAnalyzer(MockTokenizer.WHITESPACE, false));
+ mlt.setMinDocFreq(1);
+ mlt.setMinTermFreq(1);
+ mlt.setMinWordLen(1);
+ mlt.setFieldNames(new String[] {"text"});
+ mlt.setBoost(true);
+ BooleanQuery query = (BooleanQuery) mlt.like(new StringReader(
+ "lucene release"));
+ List clauses = query.clauses();
+
+ for (int i = 0; i < clauses.size(); i++) {
+ BooleanClause clause = clauses.get(i);
+ TermQuery tq = (TermQuery) clause.getQuery();
+ originalValues.put(tq.getTerm().text(), Float.valueOf(tq.getBoost()));
}
+ return originalValues;
+ }
}
diff --git a/lucene/src/java/org/apache/lucene/index/codecs/pulsing/PulsingPostingsWriterImpl.java b/lucene/src/java/org/apache/lucene/index/codecs/pulsing/PulsingPostingsWriterImpl.java
index 0cd0840b520..b9740f18ea5 100644
--- a/lucene/src/java/org/apache/lucene/index/codecs/pulsing/PulsingPostingsWriterImpl.java
+++ b/lucene/src/java/org/apache/lucene/index/codecs/pulsing/PulsingPostingsWriterImpl.java
@@ -229,7 +229,7 @@ public final class PulsingPostingsWriterImpl extends StandardPostingsWriter {
@Override
public void finishDoc() {
- assert currentDoc.numPositions == currentDoc.termDocFreq;
+ assert omitTF || currentDoc.numPositions == currentDoc.termDocFreq;
}
boolean pendingIsIndexTerm;
diff --git a/lucene/src/test/org/apache/lucene/document/TestBinaryDocument.java b/lucene/src/test/org/apache/lucene/document/TestBinaryDocument.java
index 232714f5b34..da31b7bd503 100644
--- a/lucene/src/test/org/apache/lucene/document/TestBinaryDocument.java
+++ b/lucene/src/test/org/apache/lucene/document/TestBinaryDocument.java
@@ -4,8 +4,8 @@ import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.store.MockRAMDirectory;
/**
@@ -58,13 +58,12 @@ public class TestBinaryDocument extends LuceneTestCase {
/** add the doc to a ram index */
MockRAMDirectory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer()));
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), dir,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer.addDocument(doc);
- writer.close();
/** open a reader and fetch the document */
- IndexReader reader = IndexReader.open(dir, false);
+ IndexReader reader = writer.getReader();
Document docFromReader = reader.document(0);
assertTrue(docFromReader != null);
@@ -76,6 +75,10 @@ public class TestBinaryDocument extends LuceneTestCase {
String stringFldStoredTest = docFromReader.get("stringStored");
assertTrue(stringFldStoredTest.equals(binaryValStored));
+ writer.close();
+ reader.close();
+
+ reader = IndexReader.open(dir, false);
/** delete the document from index */
reader.deleteDocument(0);
assertEquals(0, reader.numDocs());
@@ -95,13 +98,12 @@ public class TestBinaryDocument extends LuceneTestCase {
/** add the doc to a ram index */
MockRAMDirectory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer()));
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), dir,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer.addDocument(doc);
- writer.close();
/** open a reader and fetch the document */
- IndexReader reader = IndexReader.open(dir, false);
+ IndexReader reader = writer.getReader();
Document docFromReader = reader.document(0);
assertTrue(docFromReader != null);
@@ -110,6 +112,7 @@ public class TestBinaryDocument extends LuceneTestCase {
assertTrue(binaryFldCompressedTest.equals(binaryValCompressed));
assertTrue(CompressionTools.decompressString(docFromReader.getBinaryValue("stringCompressed")).equals(binaryValCompressed));
+ writer.close();
reader.close();
dir.close();
}
diff --git a/lucene/src/test/org/apache/lucene/document/TestDocument.java b/lucene/src/test/org/apache/lucene/document/TestDocument.java
index 567dcac4824..5751fb3bd95 100644
--- a/lucene/src/test/org/apache/lucene/document/TestDocument.java
+++ b/lucene/src/test/org/apache/lucene/document/TestDocument.java
@@ -1,8 +1,9 @@
package org.apache.lucene.document;
import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
@@ -32,17 +33,15 @@ import org.apache.lucene.util.LuceneTestCase;
/**
* Tests {@link Document} class.
*/
-public class TestDocument extends LuceneTestCase
-{
-
+public class TestDocument extends LuceneTestCase {
+
String binaryVal = "this text will be stored as a byte array in the index";
String binaryVal2 = "this text will be also stored as a byte array in the index";
- public void testBinaryField()
- throws Exception
- {
+ public void testBinaryField() throws Exception {
Document doc = new Document();
- Fieldable stringFld = new Field("string", binaryVal, Field.Store.YES, Field.Index.NO);
+ Fieldable stringFld = new Field("string", binaryVal, Field.Store.YES,
+ Field.Index.NO);
Fieldable binaryFld = new Field("binary", binaryVal.getBytes());
Fieldable binaryFld2 = new Field("binary", binaryVal2.getBytes());
@@ -67,7 +66,7 @@ public class TestDocument extends LuceneTestCase
assertEquals(3, doc.fields.size());
byte[][] binaryTests = doc.getBinaryValues("binary");
-
+
assertEquals(2, binaryTests.length);
binaryTest = new String(binaryTests[0]);
@@ -88,17 +87,17 @@ public class TestDocument extends LuceneTestCase
/**
* Tests {@link Document#removeField(String)} method for a brand new Document
* that has not been indexed yet.
- *
+ *
* @throws Exception on error
*/
- public void testRemoveForNewDocument() throws Exception
- {
+ public void testRemoveForNewDocument() throws Exception {
Document doc = makeDocumentWithFields();
assertEquals(8, doc.fields.size());
doc.removeFields("keyword");
assertEquals(6, doc.fields.size());
- doc.removeFields("doesnotexists"); // removing non-existing fields is siltenlty ignored
- doc.removeFields("keyword"); // removing a field more than once
+ doc.removeFields("doesnotexists"); // removing non-existing fields is
+ // siltenlty ignored
+ doc.removeFields("keyword"); // removing a field more than once
assertEquals(6, doc.fields.size());
doc.removeField("text");
assertEquals(5, doc.fields.size());
@@ -106,164 +105,171 @@ public class TestDocument extends LuceneTestCase
assertEquals(4, doc.fields.size());
doc.removeField("text");
assertEquals(4, doc.fields.size());
- doc.removeField("doesnotexists"); // removing non-existing fields is siltenlty ignored
+ doc.removeField("doesnotexists"); // removing non-existing fields is
+ // siltenlty ignored
assertEquals(4, doc.fields.size());
doc.removeFields("unindexed");
assertEquals(2, doc.fields.size());
doc.removeFields("unstored");
assertEquals(0, doc.fields.size());
- doc.removeFields("doesnotexists"); // removing non-existing fields is siltenlty ignored
+ doc.removeFields("doesnotexists"); // removing non-existing fields is
+ // siltenlty ignored
assertEquals(0, doc.fields.size());
}
-
- public void testConstructorExceptions()
- {
- new Field("name", "value", Field.Store.YES, Field.Index.NO); // okay
- new Field("name", "value", Field.Store.NO, Field.Index.NOT_ANALYZED); // okay
+
+ public void testConstructorExceptions() {
+ new Field("name", "value", Field.Store.YES, Field.Index.NO); // okay
+ new Field("name", "value", Field.Store.NO, Field.Index.NOT_ANALYZED); // okay
try {
new Field("name", "value", Field.Store.NO, Field.Index.NO);
fail();
- } catch(IllegalArgumentException e) {
+ } catch (IllegalArgumentException e) {
// expected exception
}
- new Field("name", "value", Field.Store.YES, Field.Index.NO, Field.TermVector.NO); // okay
+ new Field("name", "value", Field.Store.YES, Field.Index.NO,
+ Field.TermVector.NO); // okay
try {
- new Field("name", "value", Field.Store.YES, Field.Index.NO, Field.TermVector.YES);
+ new Field("name", "value", Field.Store.YES, Field.Index.NO,
+ Field.TermVector.YES);
fail();
- } catch(IllegalArgumentException e) {
+ } catch (IllegalArgumentException e) {
// expected exception
}
}
- /**
- * Tests {@link Document#getValues(String)} method for a brand new Document
- * that has not been indexed yet.
- *
- * @throws Exception on error
- */
- public void testGetValuesForNewDocument() throws Exception
- {
- doAssert(makeDocumentWithFields(), false);
+ /**
+ * Tests {@link Document#getValues(String)} method for a brand new Document
+ * that has not been indexed yet.
+ *
+ * @throws Exception on error
+ */
+ public void testGetValuesForNewDocument() throws Exception {
+ doAssert(makeDocumentWithFields(), false);
+ }
+
+ /**
+ * Tests {@link Document#getValues(String)} method for a Document retrieved
+ * from an index.
+ *
+ * @throws Exception on error
+ */
+ public void testGetValuesForIndexedDocument() throws Exception {
+ RAMDirectory dir = new RAMDirectory();
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), dir,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ writer.addDocument(makeDocumentWithFields());
+ IndexReader reader = writer.getReader();
+
+ Searcher searcher = new IndexSearcher(reader);
+
+ // search for something that does exists
+ Query query = new TermQuery(new Term("keyword", "test1"));
+
+ // ensure that queries return expected results without DateFilter first
+ ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+ assertEquals(1, hits.length);
+
+ doAssert(searcher.doc(hits[0].doc), true);
+ writer.close();
+ searcher.close();
+ reader.close();
+ dir.close();
+ }
+
+ private Document makeDocumentWithFields() {
+ Document doc = new Document();
+ doc.add(new Field("keyword", "test1", Field.Store.YES,
+ Field.Index.NOT_ANALYZED));
+ doc.add(new Field("keyword", "test2", Field.Store.YES,
+ Field.Index.NOT_ANALYZED));
+ doc.add(new Field("text", "test1", Field.Store.YES, Field.Index.ANALYZED));
+ doc.add(new Field("text", "test2", Field.Store.YES, Field.Index.ANALYZED));
+ doc.add(new Field("unindexed", "test1", Field.Store.YES, Field.Index.NO));
+ doc.add(new Field("unindexed", "test2", Field.Store.YES, Field.Index.NO));
+ doc
+ .add(new Field("unstored", "test1", Field.Store.NO,
+ Field.Index.ANALYZED));
+ doc
+ .add(new Field("unstored", "test2", Field.Store.NO,
+ Field.Index.ANALYZED));
+ return doc;
+ }
+
+ private void doAssert(Document doc, boolean fromIndex) {
+ String[] keywordFieldValues = doc.getValues("keyword");
+ String[] textFieldValues = doc.getValues("text");
+ String[] unindexedFieldValues = doc.getValues("unindexed");
+ String[] unstoredFieldValues = doc.getValues("unstored");
+
+ assertTrue(keywordFieldValues.length == 2);
+ assertTrue(textFieldValues.length == 2);
+ assertTrue(unindexedFieldValues.length == 2);
+ // this test cannot work for documents retrieved from the index
+ // since unstored fields will obviously not be returned
+ if (!fromIndex) {
+ assertTrue(unstoredFieldValues.length == 2);
}
-
- /**
- * Tests {@link Document#getValues(String)} method for a Document retrieved from
- * an index.
- *
- * @throws Exception on error
- */
- public void testGetValuesForIndexedDocument() throws Exception {
- RAMDirectory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer()));
- writer.addDocument(makeDocumentWithFields());
- writer.close();
-
- Searcher searcher = new IndexSearcher(dir, true);
-
- // search for something that does exists
- Query query = new TermQuery(new Term("keyword", "test1"));
-
- // ensure that queries return expected results without DateFilter first
- ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
- assertEquals(1, hits.length);
-
- doAssert(searcher.doc(hits[0].doc), true);
- searcher.close();
+
+ assertTrue(keywordFieldValues[0].equals("test1"));
+ assertTrue(keywordFieldValues[1].equals("test2"));
+ assertTrue(textFieldValues[0].equals("test1"));
+ assertTrue(textFieldValues[1].equals("test2"));
+ assertTrue(unindexedFieldValues[0].equals("test1"));
+ assertTrue(unindexedFieldValues[1].equals("test2"));
+ // this test cannot work for documents retrieved from the index
+ // since unstored fields will obviously not be returned
+ if (!fromIndex) {
+ assertTrue(unstoredFieldValues[0].equals("test1"));
+ assertTrue(unstoredFieldValues[1].equals("test2"));
}
-
- private Document makeDocumentWithFields()
- {
- Document doc = new Document();
- doc.add(new Field( "keyword", "test1", Field.Store.YES, Field.Index.NOT_ANALYZED));
- doc.add(new Field( "keyword", "test2", Field.Store.YES, Field.Index.NOT_ANALYZED));
- doc.add(new Field( "text", "test1", Field.Store.YES, Field.Index.ANALYZED));
- doc.add(new Field( "text", "test2", Field.Store.YES, Field.Index.ANALYZED));
- doc.add(new Field("unindexed", "test1", Field.Store.YES, Field.Index.NO));
- doc.add(new Field("unindexed", "test2", Field.Store.YES, Field.Index.NO));
- doc.add(new Field( "unstored", "test1", Field.Store.NO, Field.Index.ANALYZED));
- doc.add(new Field( "unstored", "test2", Field.Store.NO, Field.Index.ANALYZED));
- return doc;
+ }
+
+ public void testFieldSetValue() throws Exception {
+
+ Field field = new Field("id", "id1", Field.Store.YES,
+ Field.Index.NOT_ANALYZED);
+ Document doc = new Document();
+ doc.add(field);
+ doc.add(new Field("keyword", "test", Field.Store.YES,
+ Field.Index.NOT_ANALYZED));
+
+ RAMDirectory dir = new RAMDirectory();
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), dir,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ writer.addDocument(doc);
+ field.setValue("id2");
+ writer.addDocument(doc);
+ field.setValue("id3");
+ writer.addDocument(doc);
+
+ IndexReader reader = writer.getReader();
+ Searcher searcher = new IndexSearcher(reader);
+
+ Query query = new TermQuery(new Term("keyword", "test"));
+
+ // ensure that queries return expected results without DateFilter first
+ ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+ assertEquals(3, hits.length);
+ int result = 0;
+ for (int i = 0; i < 3; i++) {
+ Document doc2 = searcher.doc(hits[i].doc);
+ Field f = doc2.getField("id");
+ if (f.stringValue().equals("id1")) result |= 1;
+ else if (f.stringValue().equals("id2")) result |= 2;
+ else if (f.stringValue().equals("id3")) result |= 4;
+ else fail("unexpected id field");
}
-
- private void doAssert(Document doc, boolean fromIndex)
- {
- String[] keywordFieldValues = doc.getValues("keyword");
- String[] textFieldValues = doc.getValues("text");
- String[] unindexedFieldValues = doc.getValues("unindexed");
- String[] unstoredFieldValues = doc.getValues("unstored");
-
- assertTrue(keywordFieldValues.length == 2);
- assertTrue(textFieldValues.length == 2);
- assertTrue(unindexedFieldValues.length == 2);
- // this test cannot work for documents retrieved from the index
- // since unstored fields will obviously not be returned
- if (! fromIndex)
- {
- assertTrue(unstoredFieldValues.length == 2);
- }
-
- assertTrue(keywordFieldValues[0].equals("test1"));
- assertTrue(keywordFieldValues[1].equals("test2"));
- assertTrue(textFieldValues[0].equals("test1"));
- assertTrue(textFieldValues[1].equals("test2"));
- assertTrue(unindexedFieldValues[0].equals("test1"));
- assertTrue(unindexedFieldValues[1].equals("test2"));
- // this test cannot work for documents retrieved from the index
- // since unstored fields will obviously not be returned
- if (! fromIndex)
- {
- assertTrue(unstoredFieldValues[0].equals("test1"));
- assertTrue(unstoredFieldValues[1].equals("test2"));
- }
- }
-
- public void testFieldSetValue() throws Exception {
-
- Field field = new Field("id", "id1", Field.Store.YES, Field.Index.NOT_ANALYZED);
- Document doc = new Document();
- doc.add(field);
- doc.add(new Field("keyword", "test", Field.Store.YES, Field.Index.NOT_ANALYZED));
-
- RAMDirectory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer()));
- writer.addDocument(doc);
- field.setValue("id2");
- writer.addDocument(doc);
- field.setValue("id3");
- writer.addDocument(doc);
- writer.close();
-
- Searcher searcher = new IndexSearcher(dir, true);
-
- Query query = new TermQuery(new Term("keyword", "test"));
-
- // ensure that queries return expected results without DateFilter first
- ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
- assertEquals(3, hits.length);
- int result = 0;
- for(int i=0;i<3;i++) {
- Document doc2 = searcher.doc(hits[i].doc);
- Field f = doc2.getField("id");
- if (f.stringValue().equals("id1"))
- result |= 1;
- else if (f.stringValue().equals("id2"))
- result |= 2;
- else if (f.stringValue().equals("id3"))
- result |= 4;
- else
- fail("unexpected id field");
- }
- searcher.close();
- dir.close();
- assertEquals("did not see all IDs", 7, result);
- }
-
+ writer.close();
+ searcher.close();
+ reader.close();
+ dir.close();
+ assertEquals("did not see all IDs", 7, result);
+ }
+
public void testFieldSetValueChangeBinary() {
Field field1 = new Field("field1", new byte[0]);
- Field field2 = new Field("field2", "",
- Field.Store.YES, Field.Index.ANALYZED);
+ Field field2 = new Field("field2", "", Field.Store.YES,
+ Field.Index.ANALYZED);
try {
field1.setValue("abc");
fail("did not hit expected exception");
diff --git a/lucene/src/test/org/apache/lucene/index/RandomIndexWriter.java b/lucene/src/test/org/apache/lucene/index/RandomIndexWriter.java
new file mode 100644
index 00000000000..d58e1fcf858
--- /dev/null
+++ b/lucene/src/test/org/apache/lucene/index/RandomIndexWriter.java
@@ -0,0 +1,129 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Random;
+import java.io.Closeable;
+import java.io.IOException;
+
+import org.apache.lucene.util._TestUtil;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.codecs.Codec;
+import org.apache.lucene.index.codecs.CodecProvider;
+import org.apache.lucene.index.codecs.intblock.IntBlockCodec;
+import org.apache.lucene.index.codecs.preflex.PreFlexCodec;
+import org.apache.lucene.index.codecs.pulsing.PulsingCodec;
+import org.apache.lucene.index.codecs.sep.SepCodec;
+import org.apache.lucene.index.codecs.standard.StandardCodec;
+
+/** Silly class that randomizes the indexing experience. EG
+ * it may swap in a different merge policy/scheduler; may
+ * commit periodically; may or may not optimize in the end,
+ * may flush by doc count instead of RAM, etc.
+ */
+
+public class RandomIndexWriter implements Closeable {
+
+ public IndexWriter w;
+ private final Random r;
+ int docCount;
+ int flushAt;
+
+ public RandomIndexWriter(Random r, Directory dir, IndexWriterConfig c) throws IOException {
+ this.r = r;
+ if (r.nextBoolean()) {
+ c.setMergePolicy(new LogDocMergePolicy());
+ }
+ if (r.nextBoolean()) {
+ c.setMergeScheduler(new SerialMergeScheduler());
+ }
+ if (r.nextBoolean()) {
+ c.setMaxBufferedDocs(_TestUtil.nextInt(r, 2, 1000));
+ }
+ if (r.nextBoolean()) {
+ c.setTermIndexInterval(_TestUtil.nextInt(r, 1, 1000));
+ }
+
+ if (c.getMergePolicy() instanceof LogMergePolicy) {
+ LogMergePolicy logmp = (LogMergePolicy) c.getMergePolicy();
+ logmp.setUseCompoundDocStore(r.nextBoolean());
+ logmp.setUseCompoundFile(r.nextBoolean());
+ logmp.setCalibrateSizeByDeletes(r.nextBoolean());
+ }
+
+ c.setReaderPooling(r.nextBoolean());
+ c.setCodecProvider(new RandomCodecProvider(r));
+ w = new IndexWriter(dir, c);
+ flushAt = _TestUtil.nextInt(r, 10, 1000);
+ }
+
+ public void addDocument(Document doc) throws IOException {
+ w.addDocument(doc);
+ if (docCount++ == flushAt) {
+ w.commit();
+ flushAt += _TestUtil.nextInt(r, 10, 1000);
+ }
+ }
+
+ public void addIndexes(Directory... dirs) throws CorruptIndexException, IOException {
+ w.addIndexes(dirs);
+ }
+
+ public void deleteDocuments(Term term) throws CorruptIndexException, IOException {
+ w.deleteDocuments(term);
+ }
+
+ public int maxDoc() {
+ return w.maxDoc();
+ }
+
+ public IndexReader getReader() throws IOException {
+ if (r.nextBoolean()) {
+ return w.getReader();
+ } else {
+ w.commit();
+ return IndexReader.open(w.getDirectory(), new KeepOnlyLastCommitDeletionPolicy(), r.nextBoolean(), _TestUtil.nextInt(r, 1, 10));
+ }
+ }
+
+ public void close() throws IOException {
+ if (r.nextInt(4) == 2) {
+ w.optimize();
+ }
+ w.close();
+ }
+
+ class RandomCodecProvider extends CodecProvider {
+ final String codec;
+
+ RandomCodecProvider(Random random) {
+ register(new StandardCodec());
+ register(new IntBlockCodec());
+ register(new PreFlexCodec());
+ register(new PulsingCodec());
+ register(new SepCodec());
+ codec = CodecProvider.CORE_CODECS[random.nextInt(CodecProvider.CORE_CODECS.length)];
+ }
+
+ @Override
+ public Codec getWriter(SegmentWriteState state) {
+ return lookup(codec);
+ }
+ }
+}
diff --git a/lucene/src/test/org/apache/lucene/search/BaseTestRangeFilter.java b/lucene/src/test/org/apache/lucene/search/BaseTestRangeFilter.java
index 7f503f90ce6..7f5289c9b56 100644
--- a/lucene/src/test/org/apache/lucene/search/BaseTestRangeFilter.java
+++ b/lucene/src/test/org/apache/lucene/search/BaseTestRangeFilter.java
@@ -17,131 +17,134 @@ package org.apache.lucene.search;
* limitations under the License.
*/
+import java.io.IOException;
import java.util.Random;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.store.RAMDirectory;
public class BaseTestRangeFilter extends LuceneTestCase {
-
- public static final boolean F = false;
- public static final boolean T = true;
+
+ public static final boolean F = false;
+ public static final boolean T = true;
+
+ protected Random rand;
+
+ /**
+ * Collation interacts badly with hyphens -- collation produces different
+ * ordering than Unicode code-point ordering -- so two indexes are created:
+ * one which can't have negative random integers, for testing collated ranges,
+ * and the other which can have negative random integers, for all other tests.
+ */
+ class TestIndex {
+ int maxR;
+ int minR;
+ boolean allowNegativeRandomInts;
+ RAMDirectory index = new RAMDirectory();
- protected Random rand;
-
- /**
- * Collation interacts badly with hyphens -- collation produces different
- * ordering than Unicode code-point ordering -- so two indexes are created:
- * one which can't have negative random integers, for testing collated
- * ranges, and the other which can have negative random integers, for all
- * other tests.
- */
- class TestIndex {
- int maxR;
- int minR;
- boolean allowNegativeRandomInts;
- RAMDirectory index = new RAMDirectory();
-
- TestIndex(int minR, int maxR, boolean allowNegativeRandomInts) {
- this.minR = minR;
- this.maxR = maxR;
- this.allowNegativeRandomInts = allowNegativeRandomInts;
- }
+ TestIndex(int minR, int maxR, boolean allowNegativeRandomInts) {
+ this.minR = minR;
+ this.maxR = maxR;
+ this.allowNegativeRandomInts = allowNegativeRandomInts;
}
- TestIndex signedIndex = new TestIndex(Integer.MAX_VALUE, Integer.MIN_VALUE, true);
- TestIndex unsignedIndex = new TestIndex(Integer.MAX_VALUE, 0, false);
+ }
+
+ IndexReader signedIndexReader;
+ IndexReader unsignedIndexReader;
+
+ TestIndex signedIndexDir = new TestIndex(Integer.MAX_VALUE, Integer.MIN_VALUE, true);
+ TestIndex unsignedIndexDir = new TestIndex(Integer.MAX_VALUE, 0, false);
+
+ int minId = 0;
+ int maxId = 10000;
+
+ static final int intLength = Integer.toString(Integer.MAX_VALUE).length();
+
+ /**
+ * a simple padding function that should work with any int
+ */
+ public static String pad(int n) {
+ StringBuilder b = new StringBuilder(40);
+ String p = "0";
+ if (n < 0) {
+ p = "-";
+ n = Integer.MAX_VALUE + n + 1;
+ }
+ b.append(p);
+ String s = Integer.toString(n);
+ for (int i = s.length(); i <= intLength; i++) {
+ b.append("0");
+ }
+ b.append(s);
- int minId = 0;
- int maxId = 10000;
-
- static final int intLength = Integer.toString(Integer.MAX_VALUE).length();
+ return b.toString();
+ }
+
+ protected void setUp() throws Exception {
+ super.setUp();
+ rand = newRandom();
+ signedIndexReader = build(rand, signedIndexDir);
+ unsignedIndexReader = build(rand, unsignedIndexDir);
+ }
+
+ protected void tearDown() throws Exception {
+ signedIndexReader.close();
+ unsignedIndexReader.close();
+ super.tearDown();
+ }
+
+ private IndexReader build(Random random, TestIndex index) throws IOException {
+ /* build an index */
+ RandomIndexWriter writer = new RandomIndexWriter(random, index.index,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
+ .setOpenMode(OpenMode.CREATE));
- /**
- * a simple padding function that should work with any int
- */
- public static String pad(int n) {
- StringBuilder b = new StringBuilder(40);
- String p = "0";
- if (n < 0) {
- p = "-";
- n = Integer.MAX_VALUE + n + 1;
- }
- b.append(p);
- String s = Integer.toString(n);
- for (int i = s.length(); i <= intLength; i++) {
- b.append("0");
- }
- b.append(s);
-
- return b.toString();
- }
-
- public BaseTestRangeFilter(String name) {
- super(name);
- rand = newRandom();
- build(signedIndex);
- build(unsignedIndex);
- }
- public BaseTestRangeFilter() {
- rand = newRandom();
- build(signedIndex);
- build(unsignedIndex);
+ for (int d = minId; d <= maxId; d++) {
+ Document doc = new Document();
+ doc.add(new Field("id", pad(d), Field.Store.YES,
+ Field.Index.NOT_ANALYZED));
+ int r = index.allowNegativeRandomInts ? rand.nextInt() : rand
+ .nextInt(Integer.MAX_VALUE);
+ if (index.maxR < r) {
+ index.maxR = r;
+ }
+ if (r < index.minR) {
+ index.minR = r;
+ }
+ doc.add(new Field("rand", pad(r), Field.Store.YES,
+ Field.Index.NOT_ANALYZED));
+ doc.add(new Field("body", "body", Field.Store.YES,
+ Field.Index.NOT_ANALYZED));
+ writer.addDocument(doc);
}
- private void build(TestIndex index) {
- try {
-
- /* build an index */
- IndexWriter writer = new IndexWriter(index.index, new IndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer())
- .setOpenMode(OpenMode.CREATE));
-
- for (int d = minId; d <= maxId; d++) {
- Document doc = new Document();
- doc.add(new Field("id",pad(d), Field.Store.YES, Field.Index.NOT_ANALYZED));
- int r= index.allowNegativeRandomInts
- ? rand.nextInt() : rand.nextInt(Integer.MAX_VALUE);
- if (index.maxR < r) {
- index.maxR = r;
- }
- if (r < index.minR) {
- index.minR = r;
- }
- doc.add(new Field("rand",pad(r), Field.Store.YES, Field.Index.NOT_ANALYZED));
- doc.add(new Field("body","body", Field.Store.YES, Field.Index.NOT_ANALYZED));
- writer.addDocument(doc);
- }
-
- writer.optimize();
- writer.close();
-
- } catch (Exception e) {
- throw new RuntimeException("can't build index", e);
- }
-
+ IndexReader ir = writer.getReader();
+ writer.close();
+ return ir;
+ }
+
+ public void testPad() {
+
+ int[] tests = new int[] {-9999999, -99560, -100, -3, -1, 0, 3, 9, 10, 1000,
+ 999999999};
+ for (int i = 0; i < tests.length - 1; i++) {
+ int a = tests[i];
+ int b = tests[i + 1];
+ String aa = pad(a);
+ String bb = pad(b);
+ String label = a + ":" + aa + " vs " + b + ":" + bb;
+ assertEquals("length of " + label, aa.length(), bb.length());
+ assertTrue("compare less than " + label, aa.compareTo(bb) < 0);
}
-
- public void testPad() {
-
- int[] tests = new int[] {
- -9999999, -99560, -100, -3, -1, 0, 3, 9, 10, 1000, 999999999
- };
- for (int i = 0; i < tests.length - 1; i++) {
- int a = tests[i];
- int b = tests[i+1];
- String aa = pad(a);
- String bb = pad(b);
- String label = a + ":" + aa + " vs " + b + ":" + bb;
- assertEquals("length of " + label, aa.length(), bb.length());
- assertTrue("compare less than " + label, aa.compareTo(bb) < 0);
- }
-
- }
-
+
+ }
+
}
diff --git a/lucene/src/test/org/apache/lucene/search/TestAutomatonQuery.java b/lucene/src/test/org/apache/lucene/search/TestAutomatonQuery.java
index e77cc755e6d..7f82af1f394 100644
--- a/lucene/src/test/org/apache/lucene/search/TestAutomatonQuery.java
+++ b/lucene/src/test/org/apache/lucene/search/TestAutomatonQuery.java
@@ -18,13 +18,17 @@ package org.apache.lucene.search;
*/
import java.io.IOException;
+import java.util.Random;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.automaton.Automaton;
@@ -32,15 +36,18 @@ import org.apache.lucene.util.automaton.BasicAutomata;
import org.apache.lucene.util.automaton.BasicOperations;
public class TestAutomatonQuery extends LuceneTestCase {
+ private Directory directory;
+ private IndexReader reader;
private IndexSearcher searcher;
-
+
private final String FN = "field";
public void setUp() throws Exception {
super.setUp();
- RAMDirectory directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new MockAnalyzer(), true,
- IndexWriter.MaxFieldLength.LIMITED);
+ Random random = newRandom();
+ directory = new RAMDirectory();
+ RandomIndexWriter writer = new RandomIndexWriter(random, directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
Field titleField = new Field("title", "some title", Field.Store.NO,
Field.Index.ANALYZED);
@@ -57,13 +64,15 @@ public class TestAutomatonQuery extends LuceneTestCase {
field.setValue("doc three has some different stuff"
+ " with numbers 1234 5678.9 and letter b");
writer.addDocument(doc);
- writer.optimize();
+ reader = writer.getReader();
+ searcher = new IndexSearcher(reader);
writer.close();
- searcher = new IndexSearcher(directory, true);
}
public void tearDown() throws Exception {
searcher.close();
+ reader.close();
+ directory.close();
super.tearDown();
}
diff --git a/lucene/src/test/org/apache/lucene/search/TestAutomatonQueryUnicode.java b/lucene/src/test/org/apache/lucene/search/TestAutomatonQueryUnicode.java
index 16c6b384d7a..a5e2c434ec2 100644
--- a/lucene/src/test/org/apache/lucene/search/TestAutomatonQueryUnicode.java
+++ b/lucene/src/test/org/apache/lucene/search/TestAutomatonQueryUnicode.java
@@ -18,12 +18,16 @@ package org.apache.lucene.search;
*/
import java.io.IOException;
+import java.util.Random;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
+import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.automaton.Automaton;
@@ -35,15 +39,18 @@ import org.apache.lucene.util.automaton.RegExp;
* and the differences between UTF-8/UTF-32 and UTF-16 binary sort order.
*/
public class TestAutomatonQueryUnicode extends LuceneTestCase {
+ private IndexReader reader;
private IndexSearcher searcher;
+ private Directory directory;
private final String FN = "field";
public void setUp() throws Exception {
super.setUp();
- RAMDirectory directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new MockAnalyzer(), true,
- IndexWriter.MaxFieldLength.LIMITED);
+ Random random = newRandom();
+ directory = new RAMDirectory();
+ RandomIndexWriter writer = new RandomIndexWriter(random, directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
Field titleField = new Field("title", "some title", Field.Store.NO,
Field.Index.ANALYZED);
@@ -79,13 +86,15 @@ public class TestAutomatonQueryUnicode extends LuceneTestCase {
writer.addDocument(doc);
field.setValue("\uFFFD\uFFFD");
writer.addDocument(doc);
- writer.optimize();
+ reader = writer.getReader();
+ searcher = new IndexSearcher(reader);
writer.close();
- searcher = new IndexSearcher(directory, true);
}
public void tearDown() throws Exception {
searcher.close();
+ reader.close();
+ directory.close();
super.tearDown();
}
diff --git a/lucene/src/test/org/apache/lucene/search/TestBoolean2.java b/lucene/src/test/org/apache/lucene/search/TestBoolean2.java
index cbcad2b8d33..705a5cab741 100644
--- a/lucene/src/test/org/apache/lucene/search/TestBoolean2.java
+++ b/lucene/src/test/org/apache/lucene/search/TestBoolean2.java
@@ -23,8 +23,8 @@ import java.util.Random;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.queryParser.ParseException;
@@ -42,6 +42,7 @@ public class TestBoolean2 extends LuceneTestCase {
private IndexSearcher searcher;
private IndexSearcher bigSearcher;
private IndexReader reader;
+ private Random rnd;
private static int NUM_EXTRA_DOCS = 6000;
public static final String field = "field";
@@ -51,8 +52,9 @@ public class TestBoolean2 extends LuceneTestCase {
@Override
protected void setUp() throws Exception {
super.setUp();
+ rnd = newRandom();
RAMDirectory directory = new RAMDirectory();
- IndexWriter writer= new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ RandomIndexWriter writer= new RandomIndexWriter(rnd, directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < docFields.length; i++) {
Document doc = new Document();
doc.add(new Field(field, docFields[i], Field.Store.NO, Field.Index.ANALYZED));
@@ -69,14 +71,14 @@ public class TestBoolean2 extends LuceneTestCase {
int docCount = 0;
do {
final Directory copy = new RAMDirectory(dir2);
- IndexWriter w = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ RandomIndexWriter w = new RandomIndexWriter(rnd, dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
w.addIndexes(new Directory[] {copy});
docCount = w.maxDoc();
w.close();
mulFactor *= 2;
} while(docCount < 3000);
- IndexWriter w = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ RandomIndexWriter w = new RandomIndexWriter(rnd, dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field2", "xxx", Field.Store.NO, Field.Index.ANALYZED));
for(int i=0;i resultMap = new TreeMap();
- // store hits in TreeMap - TreeMap does not allow duplicates; existing entries are silently overwritten
- for(int hitid=0;hitid resultMap = new TreeMap();
+ // store hits in TreeMap - TreeMap does not allow duplicates; existing
+ // entries are silently overwritten
+ for (int hitid = 0; hitid < hitsByRank.length; ++hitid) {
+ resultMap.put(Integer.valueOf(hitsByRank[hitid].doc), // Key: Lucene
+ // Document ID
+ Integer.valueOf(hitid)); // Value: Hits-Objekt Index
+ }
+
+ // now make a query using the sort criteria
+ ScoreDoc[] resultSort = searcher.search(query, null, Integer.MAX_VALUE,
+ sort).scoreDocs;
checkHits(resultSort, "Sort by custom criteria: "); // check for duplicates
- // besides the sorting both sets of hits must be identical
- for(int hitid=0;hitid remove it from the Map.
- // At the end the Map must be empty!
- resultMap.remove(idHitDate);
- }
- if(resultMap.size()==0) {
- // log("All hits matched");
- } else {
- log("Couldn't match "+resultMap.size()+" hits.");
- }
- assertEquals(resultMap.size(), 0);
+ // besides the sorting both sets of hits must be identical
+ for (int hitid = 0; hitid < resultSort.length; ++hitid) {
+ Integer idHitDate = Integer.valueOf(resultSort[hitid].doc); // document ID
+ // from sorted
+ // search
+ if (!resultMap.containsKey(idHitDate)) {
+ log("ID " + idHitDate + " not found. Possibliy a duplicate.");
+ }
+ assertTrue(resultMap.containsKey(idHitDate)); // same ID must be in the
+ // Map from the rank-sorted
+ // search
+ // every hit must appear once in both result sets --> remove it from the
+ // Map.
+ // At the end the Map must be empty!
+ resultMap.remove(idHitDate);
+ }
+ if (resultMap.size() == 0) {
+ // log("All hits matched");
+ } else {
+ log("Couldn't match " + resultMap.size() + " hits.");
+ }
+ assertEquals(resultMap.size(), 0);
}
-
+
/**
* Check the hits for duplicates.
+ *
* @param hits
*/
- private void checkHits(ScoreDoc[] hits, String prefix) {
- if(hits!=null) {
- Map idMap = new TreeMap();
- for(int docnum=0;docnum
- * same as TestRankingSimilarity in TestRanking.zip from
- * http://issues.apache.org/jira/browse/LUCENE-323
- *
- */
- private static class TestSimilarity extends DefaultSimilarity {
-
- public TestSimilarity() {
- }
- @Override
- public float tf(float freq) {
- if (freq > 0.0f) return 1.0f;
- else return 0.0f;
- }
- @Override
- public float lengthNorm(String fieldName, int numTerms) {
- return 1.0f;
- }
- @Override
- public float idf(int docFreq, int numDocs) {
- return 1.0f;
- }
- }
-
- public Similarity sim = new TestSimilarity();
- public Directory index;
- public IndexReader r;
- public IndexSearcher s;
-
+public class TestDisjunctionMaxQuery extends LuceneTestCase {
+
+ /** threshold for comparing floats */
+ public static final float SCORE_COMP_THRESH = 0.0000f;
+
+ /**
+ * Similarity to eliminate tf, idf and lengthNorm effects to isolate test
+ * case.
+ *
+ *
+ * same as TestRankingSimilarity in TestRanking.zip from
+ * http://issues.apache.org/jira/browse/LUCENE-323
+ *
+ */
+ private static class TestSimilarity extends DefaultSimilarity {
+
+ public TestSimilarity() {}
+
@Override
- protected void setUp() throws Exception {
- super.setUp();
-
- index = new RAMDirectory();
- IndexWriter writer = new IndexWriter(index, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setSimilarity(sim));
-
- // hed is the most important field, dek is secondary
-
- // d1 is an "ok" match for: albino elephant
- {
- Document d1 = new Document();
- d1.add(new Field("id", "d1", Field.Store.YES, Field.Index.NOT_ANALYZED));//Field.Keyword("id", "d1"));
- d1.add(new Field("hed", "elephant", Field.Store.YES, Field.Index.ANALYZED));//Field.Text("hed", "elephant"));
- d1.add(new Field("dek", "elephant", Field.Store.YES, Field.Index.ANALYZED));//Field.Text("dek", "elephant"));
- writer.addDocument(d1);
- }
-
- // d2 is a "good" match for: albino elephant
- {
- Document d2 = new Document();
- d2.add(new Field("id", "d2", Field.Store.YES, Field.Index.NOT_ANALYZED));//Field.Keyword("id", "d2"));
- d2.add(new Field("hed", "elephant", Field.Store.YES, Field.Index.ANALYZED));//Field.Text("hed", "elephant"));
- d2.add(new Field("dek", "albino", Field.Store.YES, Field.Index.ANALYZED));//Field.Text("dek", "albino"));
- d2.add(new Field("dek", "elephant", Field.Store.YES, Field.Index.ANALYZED));//Field.Text("dek", "elephant"));
- writer.addDocument(d2);
- }
-
- // d3 is a "better" match for: albino elephant
- {
- Document d3 = new Document();
- d3.add(new Field("id", "d3", Field.Store.YES, Field.Index.NOT_ANALYZED));//Field.Keyword("id", "d3"));
- d3.add(new Field("hed", "albino", Field.Store.YES, Field.Index.ANALYZED));//Field.Text("hed", "albino"));
- d3.add(new Field("hed", "elephant", Field.Store.YES, Field.Index.ANALYZED));//Field.Text("hed", "elephant"));
- writer.addDocument(d3);
- }
-
- // d4 is the "best" match for: albino elephant
- {
- Document d4 = new Document();
- d4.add(new Field("id", "d4", Field.Store.YES, Field.Index.NOT_ANALYZED));//Field.Keyword("id", "d4"));
- d4.add(new Field("hed", "albino", Field.Store.YES, Field.Index.ANALYZED));//Field.Text("hed", "albino"));
- d4.add(new Field("hed", "elephant", Field.Store.YES, Field.Index.ANALYZED));//Field.Text("hed", "elephant"));
- d4.add(new Field("dek", "albino", Field.Store.YES, Field.Index.ANALYZED));//Field.Text("dek", "albino"));
- writer.addDocument(d4);
- }
-
- writer.close();
-
- r = IndexReader.open(index, true);
- s = new IndexSearcher(r);
- s.setSimilarity(sim);
+ public float tf(float freq) {
+ if (freq > 0.0f) return 1.0f;
+ else return 0.0f;
}
-
+
+ @Override
+ public float lengthNorm(String fieldName, int numTerms) {
+ return 1.0f;
+ }
+
+ @Override
+ public float idf(int docFreq, int numDocs) {
+ return 1.0f;
+ }
+ }
+
+ public Similarity sim = new TestSimilarity();
+ public Directory index;
+ public IndexReader r;
+ public IndexSearcher s;
+
+ @Override
+ protected void setUp() throws Exception {
+ super.setUp();
+
+ index = new RAMDirectory();
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), index,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
+ .setSimilarity(sim));
+
+ // hed is the most important field, dek is secondary
+
+ // d1 is an "ok" match for: albino elephant
+ {
+ Document d1 = new Document();
+ d1.add(new Field("id", "d1", Field.Store.YES, Field.Index.NOT_ANALYZED));// Field.Keyword("id",
+ // "d1"));
+ d1
+ .add(new Field("hed", "elephant", Field.Store.YES,
+ Field.Index.ANALYZED));// Field.Text("hed", "elephant"));
+ d1
+ .add(new Field("dek", "elephant", Field.Store.YES,
+ Field.Index.ANALYZED));// Field.Text("dek", "elephant"));
+ writer.addDocument(d1);
+ }
+
+ // d2 is a "good" match for: albino elephant
+ {
+ Document d2 = new Document();
+ d2.add(new Field("id", "d2", Field.Store.YES, Field.Index.NOT_ANALYZED));// Field.Keyword("id",
+ // "d2"));
+ d2
+ .add(new Field("hed", "elephant", Field.Store.YES,
+ Field.Index.ANALYZED));// Field.Text("hed", "elephant"));
+ d2.add(new Field("dek", "albino", Field.Store.YES, Field.Index.ANALYZED));// Field.Text("dek",
+ // "albino"));
+ d2
+ .add(new Field("dek", "elephant", Field.Store.YES,
+ Field.Index.ANALYZED));// Field.Text("dek", "elephant"));
+ writer.addDocument(d2);
+ }
+
+ // d3 is a "better" match for: albino elephant
+ {
+ Document d3 = new Document();
+ d3.add(new Field("id", "d3", Field.Store.YES, Field.Index.NOT_ANALYZED));// Field.Keyword("id",
+ // "d3"));
+ d3.add(new Field("hed", "albino", Field.Store.YES, Field.Index.ANALYZED));// Field.Text("hed",
+ // "albino"));
+ d3
+ .add(new Field("hed", "elephant", Field.Store.YES,
+ Field.Index.ANALYZED));// Field.Text("hed", "elephant"));
+ writer.addDocument(d3);
+ }
+
+ // d4 is the "best" match for: albino elephant
+ {
+ Document d4 = new Document();
+ d4.add(new Field("id", "d4", Field.Store.YES, Field.Index.NOT_ANALYZED));// Field.Keyword("id",
+ // "d4"));
+ d4.add(new Field("hed", "albino", Field.Store.YES, Field.Index.ANALYZED));// Field.Text("hed",
+ // "albino"));
+ d4
+ .add(new Field("hed", "elephant", Field.Store.YES,
+ Field.Index.ANALYZED));// Field.Text("hed", "elephant"));
+ d4.add(new Field("dek", "albino", Field.Store.YES, Field.Index.ANALYZED));// Field.Text("dek",
+ // "albino"));
+ writer.addDocument(d4);
+ }
+
+ r = writer.getReader();
+ writer.close();
+ s = new IndexSearcher(r);
+ s.setSimilarity(sim);
+ }
+
+ @Override
+ protected void tearDown() throws Exception {
+ s.close();
+ r.close();
+ index.close();
+ super.tearDown();
+ }
+
public void testSkipToFirsttimeMiss() throws IOException {
final DisjunctionMaxQuery dq = new DisjunctionMaxQuery(0.0f);
- dq.add(tq("id","d1"));
- dq.add(tq("dek","DOES_NOT_EXIST"));
-
- QueryUtils.check(dq,s);
-
+ dq.add(tq("id", "d1"));
+ dq.add(tq("dek", "DOES_NOT_EXIST"));
+
+ QueryUtils.check(dq, s);
+
final Weight dw = dq.weight(s);
final Scorer ds = dw.scorer(r, true, false);
final boolean skipOk = ds.advance(3) != DocIdSetIterator.NO_MORE_DOCS;
if (skipOk) {
- fail("firsttime skipTo found a match? ... " + r.document(ds.docID()).get("id"));
+ fail("firsttime skipTo found a match? ... "
+ + r.document(ds.docID()).get("id"));
}
}
-
+
public void testSkipToFirsttimeHit() throws IOException {
final DisjunctionMaxQuery dq = new DisjunctionMaxQuery(0.0f);
- dq.add(tq("dek","albino"));
- dq.add(tq("dek","DOES_NOT_EXIST"));
-
- QueryUtils.check(dq,s);
-
+ dq.add(tq("dek", "albino"));
+ dq.add(tq("dek", "DOES_NOT_EXIST"));
+
+ QueryUtils.check(dq, s);
+
final Weight dw = dq.weight(s);
final Scorer ds = dw.scorer(r, true, false);
- assertTrue("firsttime skipTo found no match", ds.advance(3) != DocIdSetIterator.NO_MORE_DOCS);
+ assertTrue("firsttime skipTo found no match",
+ ds.advance(3) != DocIdSetIterator.NO_MORE_DOCS);
assertEquals("found wrong docid", "d4", r.document(ds.docID()).get("id"));
}
-
+
public void testSimpleEqualScores1() throws Exception {
-
+
DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.0f);
- q.add(tq("hed","albino"));
- q.add(tq("hed","elephant"));
- QueryUtils.check(q,s);
-
+ q.add(tq("hed", "albino"));
+ q.add(tq("hed", "elephant"));
+ QueryUtils.check(q, s);
+
ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
-
+
try {
- assertEquals("all docs should match " + q.toString(),
- 4, h.length);
-
+ assertEquals("all docs should match " + q.toString(), 4, h.length);
+
float score = h[0].score;
for (int i = 1; i < h.length; i++) {
- assertEquals("score #" + i + " is not the same",
- score, h[i].score, SCORE_COMP_THRESH);
+ assertEquals("score #" + i + " is not the same", score, h[i].score,
+ SCORE_COMP_THRESH);
}
} catch (Error e) {
- printHits("testSimpleEqualScores1",h,s);
+ printHits("testSimpleEqualScores1", h, s);
throw e;
}
-
+
}
-
- public void testSimpleEqualScores2() throws Exception {
-
- DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.0f);
- q.add(tq("dek","albino"));
- q.add(tq("dek","elephant"));
- QueryUtils.check(q,s);
-
-
- ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
-
- try {
- assertEquals("3 docs should match " + q.toString(),
- 3, h.length);
- float score = h[0].score;
- for (int i = 1; i < h.length; i++) {
- assertEquals("score #" + i + " is not the same",
- score, h[i].score, SCORE_COMP_THRESH);
- }
- } catch (Error e) {
- printHits("testSimpleEqualScores2",h, s);
- throw e;
- }
-
+
+ public void testSimpleEqualScores2() throws Exception {
+
+ DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.0f);
+ q.add(tq("dek", "albino"));
+ q.add(tq("dek", "elephant"));
+ QueryUtils.check(q, s);
+
+ ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
+
+ try {
+ assertEquals("3 docs should match " + q.toString(), 3, h.length);
+ float score = h[0].score;
+ for (int i = 1; i < h.length; i++) {
+ assertEquals("score #" + i + " is not the same", score, h[i].score,
+ SCORE_COMP_THRESH);
+ }
+ } catch (Error e) {
+ printHits("testSimpleEqualScores2", h, s);
+ throw e;
}
-
- public void testSimpleEqualScores3() throws Exception {
-
- DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.0f);
- q.add(tq("hed","albino"));
- q.add(tq("hed","elephant"));
- q.add(tq("dek","albino"));
- q.add(tq("dek","elephant"));
- QueryUtils.check(q,s);
-
-
- ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
-
- try {
- assertEquals("all docs should match " + q.toString(),
- 4, h.length);
- float score = h[0].score;
- for (int i = 1; i < h.length; i++) {
- assertEquals("score #" + i + " is not the same",
- score, h[i].score, SCORE_COMP_THRESH);
- }
- } catch (Error e) {
- printHits("testSimpleEqualScores3",h, s);
- throw e;
- }
-
+
+ }
+
+ public void testSimpleEqualScores3() throws Exception {
+
+ DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.0f);
+ q.add(tq("hed", "albino"));
+ q.add(tq("hed", "elephant"));
+ q.add(tq("dek", "albino"));
+ q.add(tq("dek", "elephant"));
+ QueryUtils.check(q, s);
+
+ ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
+
+ try {
+ assertEquals("all docs should match " + q.toString(), 4, h.length);
+ float score = h[0].score;
+ for (int i = 1; i < h.length; i++) {
+ assertEquals("score #" + i + " is not the same", score, h[i].score,
+ SCORE_COMP_THRESH);
+ }
+ } catch (Error e) {
+ printHits("testSimpleEqualScores3", h, s);
+ throw e;
}
-
- public void testSimpleTiebreaker() throws Exception {
-
- DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.01f);
- q.add(tq("dek","albino"));
- q.add(tq("dek","elephant"));
- QueryUtils.check(q,s);
-
-
- ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
-
- try {
- assertEquals("3 docs should match " + q.toString(),
- 3, h.length);
- assertEquals("wrong first", "d2", s.doc(h[0].doc).get("id"));
- float score0 = h[0].score;
- float score1 = h[1].score;
- float score2 = h[2].score;
- assertTrue("d2 does not have better score then others: " +
- score0 + " >? " + score1,
- score0 > score1);
- assertEquals("d4 and d1 don't have equal scores",
- score1, score2, SCORE_COMP_THRESH);
- } catch (Error e) {
- printHits("testSimpleTiebreaker",h, s);
- throw e;
- }
+
+ }
+
+ public void testSimpleTiebreaker() throws Exception {
+
+ DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.01f);
+ q.add(tq("dek", "albino"));
+ q.add(tq("dek", "elephant"));
+ QueryUtils.check(q, s);
+
+ ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
+
+ try {
+ assertEquals("3 docs should match " + q.toString(), 3, h.length);
+ assertEquals("wrong first", "d2", s.doc(h[0].doc).get("id"));
+ float score0 = h[0].score;
+ float score1 = h[1].score;
+ float score2 = h[2].score;
+ assertTrue("d2 does not have better score then others: " + score0
+ + " >? " + score1, score0 > score1);
+ assertEquals("d4 and d1 don't have equal scores", score1, score2,
+ SCORE_COMP_THRESH);
+ } catch (Error e) {
+ printHits("testSimpleTiebreaker", h, s);
+ throw e;
}
-
- public void testBooleanRequiredEqualScores() throws Exception {
-
- BooleanQuery q = new BooleanQuery();
- {
- DisjunctionMaxQuery q1 = new DisjunctionMaxQuery(0.0f);
- q1.add(tq("hed","albino"));
- q1.add(tq("dek","albino"));
- q.add(q1,BooleanClause.Occur.MUST);//true,false);
- QueryUtils.check(q1,s);
-
- }
- {
- DisjunctionMaxQuery q2 = new DisjunctionMaxQuery(0.0f);
- q2.add(tq("hed","elephant"));
- q2.add(tq("dek","elephant"));
- q.add(q2, BooleanClause.Occur.MUST);//true,false);
- QueryUtils.check(q2,s);
- }
-
- QueryUtils.check(q,s);
-
- ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
-
- try {
- assertEquals("3 docs should match " + q.toString(),
- 3, h.length);
- float score = h[0].score;
- for (int i = 1; i < h.length; i++) {
- assertEquals("score #" + i + " is not the same",
- score, h[i].score, SCORE_COMP_THRESH);
- }
- } catch (Error e) {
- printHits("testBooleanRequiredEqualScores1",h, s);
- throw e;
- }
+ }
+
+ public void testBooleanRequiredEqualScores() throws Exception {
+
+ BooleanQuery q = new BooleanQuery();
+ {
+ DisjunctionMaxQuery q1 = new DisjunctionMaxQuery(0.0f);
+ q1.add(tq("hed", "albino"));
+ q1.add(tq("dek", "albino"));
+ q.add(q1, BooleanClause.Occur.MUST);// true,false);
+ QueryUtils.check(q1, s);
+
}
-
-
- public void testBooleanOptionalNoTiebreaker() throws Exception {
-
- BooleanQuery q = new BooleanQuery();
- {
- DisjunctionMaxQuery q1 = new DisjunctionMaxQuery(0.0f);
- q1.add(tq("hed","albino"));
- q1.add(tq("dek","albino"));
- q.add(q1, BooleanClause.Occur.SHOULD);//false,false);
- }
- {
- DisjunctionMaxQuery q2 = new DisjunctionMaxQuery(0.0f);
- q2.add(tq("hed","elephant"));
- q2.add(tq("dek","elephant"));
- q.add(q2, BooleanClause.Occur.SHOULD);//false,false);
- }
- QueryUtils.check(q,s);
-
-
- ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
-
- try {
- assertEquals("4 docs should match " + q.toString(),
- 4, h.length);
- float score = h[0].score;
- for (int i = 1; i < h.length-1; i++) { /* note: -1 */
- assertEquals("score #" + i + " is not the same",
- score, h[i].score, SCORE_COMP_THRESH);
- }
- assertEquals("wrong last", "d1", s.doc(h[h.length-1].doc).get("id"));
- float score1 = h[h.length-1].score;
- assertTrue("d1 does not have worse score then others: " +
- score + " >? " + score1,
- score > score1);
- } catch (Error e) {
- printHits("testBooleanOptionalNoTiebreaker",h, s);
- throw e;
- }
+ {
+ DisjunctionMaxQuery q2 = new DisjunctionMaxQuery(0.0f);
+ q2.add(tq("hed", "elephant"));
+ q2.add(tq("dek", "elephant"));
+ q.add(q2, BooleanClause.Occur.MUST);// true,false);
+ QueryUtils.check(q2, s);
}
-
-
- public void testBooleanOptionalWithTiebreaker() throws Exception {
-
- BooleanQuery q = new BooleanQuery();
- {
- DisjunctionMaxQuery q1 = new DisjunctionMaxQuery(0.01f);
- q1.add(tq("hed","albino"));
- q1.add(tq("dek","albino"));
- q.add(q1, BooleanClause.Occur.SHOULD);//false,false);
- }
- {
- DisjunctionMaxQuery q2 = new DisjunctionMaxQuery(0.01f);
- q2.add(tq("hed","elephant"));
- q2.add(tq("dek","elephant"));
- q.add(q2, BooleanClause.Occur.SHOULD);//false,false);
- }
- QueryUtils.check(q,s);
-
-
- ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
-
- try {
-
- assertEquals("4 docs should match " + q.toString(),
- 4, h.length);
-
- float score0 = h[0].score;
- float score1 = h[1].score;
- float score2 = h[2].score;
- float score3 = h[3].score;
-
- String doc0 = s.doc(h[0].doc).get("id");
- String doc1 = s.doc(h[1].doc).get("id");
- String doc2 = s.doc(h[2].doc).get("id");
- String doc3 = s.doc(h[3].doc).get("id");
-
- assertTrue("doc0 should be d2 or d4: " + doc0,
- doc0.equals("d2") || doc0.equals("d4"));
- assertTrue("doc1 should be d2 or d4: " + doc0,
- doc1.equals("d2") || doc1.equals("d4"));
- assertEquals("score0 and score1 should match",
- score0, score1, SCORE_COMP_THRESH);
- assertEquals("wrong third", "d3", doc2);
- assertTrue("d3 does not have worse score then d2 and d4: " +
- score1 + " >? " + score2,
- score1 > score2);
-
- assertEquals("wrong fourth", "d1", doc3);
- assertTrue("d1 does not have worse score then d3: " +
- score2 + " >? " + score3,
- score2 > score3);
-
- } catch (Error e) {
- printHits("testBooleanOptionalWithTiebreaker",h, s);
- throw e;
- }
-
+
+ QueryUtils.check(q, s);
+
+ ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
+
+ try {
+ assertEquals("3 docs should match " + q.toString(), 3, h.length);
+ float score = h[0].score;
+ for (int i = 1; i < h.length; i++) {
+ assertEquals("score #" + i + " is not the same", score, h[i].score,
+ SCORE_COMP_THRESH);
+ }
+ } catch (Error e) {
+ printHits("testBooleanRequiredEqualScores1", h, s);
+ throw e;
}
-
-
- public void testBooleanOptionalWithTiebreakerAndBoost() throws Exception {
-
- BooleanQuery q = new BooleanQuery();
- {
- DisjunctionMaxQuery q1 = new DisjunctionMaxQuery(0.01f);
- q1.add(tq("hed","albino", 1.5f));
- q1.add(tq("dek","albino"));
- q.add(q1, BooleanClause.Occur.SHOULD);//false,false);
- }
- {
- DisjunctionMaxQuery q2 = new DisjunctionMaxQuery(0.01f);
- q2.add(tq("hed","elephant", 1.5f));
- q2.add(tq("dek","elephant"));
- q.add(q2, BooleanClause.Occur.SHOULD);//false,false);
- }
- QueryUtils.check(q,s);
-
-
- ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
-
- try {
-
- assertEquals("4 docs should match " + q.toString(),
- 4, h.length);
-
- float score0 = h[0].score;
- float score1 = h[1].score;
- float score2 = h[2].score;
- float score3 = h[3].score;
-
- String doc0 = s.doc(h[0].doc).get("id");
- String doc1 = s.doc(h[1].doc).get("id");
- String doc2 = s.doc(h[2].doc).get("id");
- String doc3 = s.doc(h[3].doc).get("id");
-
- assertEquals("doc0 should be d4: ", "d4", doc0);
- assertEquals("doc1 should be d3: ", "d3", doc1);
- assertEquals("doc2 should be d2: ", "d2", doc2);
- assertEquals("doc3 should be d1: ", "d1", doc3);
-
- assertTrue("d4 does not have a better score then d3: " +
- score0 + " >? " + score1,
- score0 > score1);
- assertTrue("d3 does not have a better score then d2: " +
- score1 + " >? " + score2,
- score1 > score2);
- assertTrue("d3 does not have a better score then d1: " +
- score2 + " >? " + score3,
- score2 > score3);
-
- } catch (Error e) {
- printHits("testBooleanOptionalWithTiebreakerAndBoost",h, s);
- throw e;
- }
+ }
+
+ public void testBooleanOptionalNoTiebreaker() throws Exception {
+
+ BooleanQuery q = new BooleanQuery();
+ {
+ DisjunctionMaxQuery q1 = new DisjunctionMaxQuery(0.0f);
+ q1.add(tq("hed", "albino"));
+ q1.add(tq("dek", "albino"));
+ q.add(q1, BooleanClause.Occur.SHOULD);// false,false);
}
-
-
-
-
-
-
-
- /** macro */
- protected Query tq(String f, String t) {
- return new TermQuery(new Term(f, t));
+ {
+ DisjunctionMaxQuery q2 = new DisjunctionMaxQuery(0.0f);
+ q2.add(tq("hed", "elephant"));
+ q2.add(tq("dek", "elephant"));
+ q.add(q2, BooleanClause.Occur.SHOULD);// false,false);
}
- /** macro */
- protected Query tq(String f, String t, float b) {
- Query q = tq(f,t);
- q.setBoost(b);
- return q;
+ QueryUtils.check(q, s);
+
+ ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
+
+ try {
+ assertEquals("4 docs should match " + q.toString(), 4, h.length);
+ float score = h[0].score;
+ for (int i = 1; i < h.length - 1; i++) { /* note: -1 */
+ assertEquals("score #" + i + " is not the same", score, h[i].score,
+ SCORE_COMP_THRESH);
+ }
+ assertEquals("wrong last", "d1", s.doc(h[h.length - 1].doc).get("id"));
+ float score1 = h[h.length - 1].score;
+ assertTrue("d1 does not have worse score then others: " + score + " >? "
+ + score1, score > score1);
+ } catch (Error e) {
+ printHits("testBooleanOptionalNoTiebreaker", h, s);
+ throw e;
}
-
-
- protected void printHits(String test, ScoreDoc[] h, Searcher searcher) throws Exception {
-
- System.err.println("------- " + test + " -------");
-
- DecimalFormat f = new DecimalFormat("0.000000000");
-
- for (int i = 0; i < h.length; i++) {
- Document d = searcher.doc(h[i].doc);
- float score = h[i].score;
- System.err.println("#" + i + ": " + f.format(score) + " - " +
- d.get("id"));
- }
+ }
+
+ public void testBooleanOptionalWithTiebreaker() throws Exception {
+
+ BooleanQuery q = new BooleanQuery();
+ {
+ DisjunctionMaxQuery q1 = new DisjunctionMaxQuery(0.01f);
+ q1.add(tq("hed", "albino"));
+ q1.add(tq("dek", "albino"));
+ q.add(q1, BooleanClause.Occur.SHOULD);// false,false);
}
-
+ {
+ DisjunctionMaxQuery q2 = new DisjunctionMaxQuery(0.01f);
+ q2.add(tq("hed", "elephant"));
+ q2.add(tq("dek", "elephant"));
+ q.add(q2, BooleanClause.Occur.SHOULD);// false,false);
+ }
+ QueryUtils.check(q, s);
+
+ ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
+
+ try {
+
+ assertEquals("4 docs should match " + q.toString(), 4, h.length);
+
+ float score0 = h[0].score;
+ float score1 = h[1].score;
+ float score2 = h[2].score;
+ float score3 = h[3].score;
+
+ String doc0 = s.doc(h[0].doc).get("id");
+ String doc1 = s.doc(h[1].doc).get("id");
+ String doc2 = s.doc(h[2].doc).get("id");
+ String doc3 = s.doc(h[3].doc).get("id");
+
+ assertTrue("doc0 should be d2 or d4: " + doc0, doc0.equals("d2")
+ || doc0.equals("d4"));
+ assertTrue("doc1 should be d2 or d4: " + doc0, doc1.equals("d2")
+ || doc1.equals("d4"));
+ assertEquals("score0 and score1 should match", score0, score1,
+ SCORE_COMP_THRESH);
+ assertEquals("wrong third", "d3", doc2);
+ assertTrue("d3 does not have worse score then d2 and d4: " + score1
+ + " >? " + score2, score1 > score2);
+
+ assertEquals("wrong fourth", "d1", doc3);
+ assertTrue("d1 does not have worse score then d3: " + score2 + " >? "
+ + score3, score2 > score3);
+
+ } catch (Error e) {
+ printHits("testBooleanOptionalWithTiebreaker", h, s);
+ throw e;
+ }
+
+ }
+
+ public void testBooleanOptionalWithTiebreakerAndBoost() throws Exception {
+
+ BooleanQuery q = new BooleanQuery();
+ {
+ DisjunctionMaxQuery q1 = new DisjunctionMaxQuery(0.01f);
+ q1.add(tq("hed", "albino", 1.5f));
+ q1.add(tq("dek", "albino"));
+ q.add(q1, BooleanClause.Occur.SHOULD);// false,false);
+ }
+ {
+ DisjunctionMaxQuery q2 = new DisjunctionMaxQuery(0.01f);
+ q2.add(tq("hed", "elephant", 1.5f));
+ q2.add(tq("dek", "elephant"));
+ q.add(q2, BooleanClause.Occur.SHOULD);// false,false);
+ }
+ QueryUtils.check(q, s);
+
+ ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
+
+ try {
+
+ assertEquals("4 docs should match " + q.toString(), 4, h.length);
+
+ float score0 = h[0].score;
+ float score1 = h[1].score;
+ float score2 = h[2].score;
+ float score3 = h[3].score;
+
+ String doc0 = s.doc(h[0].doc).get("id");
+ String doc1 = s.doc(h[1].doc).get("id");
+ String doc2 = s.doc(h[2].doc).get("id");
+ String doc3 = s.doc(h[3].doc).get("id");
+
+ assertEquals("doc0 should be d4: ", "d4", doc0);
+ assertEquals("doc1 should be d3: ", "d3", doc1);
+ assertEquals("doc2 should be d2: ", "d2", doc2);
+ assertEquals("doc3 should be d1: ", "d1", doc3);
+
+ assertTrue("d4 does not have a better score then d3: " + score0 + " >? "
+ + score1, score0 > score1);
+ assertTrue("d3 does not have a better score then d2: " + score1 + " >? "
+ + score2, score1 > score2);
+ assertTrue("d3 does not have a better score then d1: " + score2 + " >? "
+ + score3, score2 > score3);
+
+ } catch (Error e) {
+ printHits("testBooleanOptionalWithTiebreakerAndBoost", h, s);
+ throw e;
+ }
+ }
+
+ /** macro */
+ protected Query tq(String f, String t) {
+ return new TermQuery(new Term(f, t));
+ }
+
+ /** macro */
+ protected Query tq(String f, String t, float b) {
+ Query q = tq(f, t);
+ q.setBoost(b);
+ return q;
+ }
+
+ protected void printHits(String test, ScoreDoc[] h, Searcher searcher)
+ throws Exception {
+
+ System.err.println("------- " + test + " -------");
+
+ DecimalFormat f = new DecimalFormat("0.000000000");
+
+ for (int i = 0; i < h.length; i++) {
+ Document d = searcher.doc(h[i].doc);
+ float score = h[i].score;
+ System.err
+ .println("#" + i + ": " + f.format(score) + " - " + d.get("id"));
+ }
+ }
}
diff --git a/lucene/src/test/org/apache/lucene/search/TestDocBoost.java b/lucene/src/test/org/apache/lucene/search/TestDocBoost.java
index 180bdf893ff..e3c6449238b 100644
--- a/lucene/src/test/org/apache/lucene/search/TestDocBoost.java
+++ b/lucene/src/test/org/apache/lucene/search/TestDocBoost.java
@@ -23,8 +23,8 @@ import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.*;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.RAMDirectory;
@@ -40,8 +40,8 @@ public class TestDocBoost extends LuceneTestCase {
public void testDocBoost() throws Exception {
RAMDirectory store = new RAMDirectory();
- IndexWriter writer = new IndexWriter(store, new IndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer()));
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), store,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Fieldable f1 = new Field("field", "word", Field.Store.YES, Field.Index.ANALYZED);
Fieldable f2 = new Field("field", "word", Field.Store.YES, Field.Index.ANALYZED);
@@ -63,12 +63,13 @@ public class TestDocBoost extends LuceneTestCase {
writer.addDocument(d2);
writer.addDocument(d3);
writer.addDocument(d4);
- writer.optimize();
+
+ IndexReader reader = writer.getReader();
writer.close();
final float[] scores = new float[4];
- new IndexSearcher(store, true).search
+ new IndexSearcher(reader).search
(new TermQuery(new Term("field", "word")),
new Collector() {
private int base = 0;
@@ -97,5 +98,8 @@ public class TestDocBoost extends LuceneTestCase {
assertTrue(scores[i] > lastScore);
lastScore = scores[i];
}
+
+ reader.close();
+ store.close();
}
}
diff --git a/lucene/src/test/org/apache/lucene/search/TestDocIdSet.java b/lucene/src/test/org/apache/lucene/search/TestDocIdSet.java
index 375eabbb5f8..8cd7e4008f8 100644
--- a/lucene/src/test/org/apache/lucene/search/TestDocIdSet.java
+++ b/lucene/src/test/org/apache/lucene/search/TestDocIdSet.java
@@ -30,8 +30,8 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.document.Field.Index;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
@@ -104,14 +104,16 @@ public class TestDocIdSet extends LuceneTestCase {
// Tests that if a Filter produces a null DocIdSet, which is given to
// IndexSearcher, everything works fine. This came up in LUCENE-1754.
Directory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), dir,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("c", "val", Store.NO, Index.NOT_ANALYZED_NO_NORMS));
writer.addDocument(doc);
+ IndexReader reader = writer.getReader();
writer.close();
// First verify the document is searchable.
- IndexSearcher searcher = new IndexSearcher(dir, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
Assert.assertEquals(1, searcher.search(new MatchAllDocsQuery(), 10).totalHits);
// Now search w/ a Filter which returns a null DocIdSet
@@ -124,6 +126,8 @@ public class TestDocIdSet extends LuceneTestCase {
Assert.assertEquals(0, searcher.search(new MatchAllDocsQuery(), f, 10).totalHits);
searcher.close();
+ reader.close();
+ dir.close();
}
}
diff --git a/lucene/src/test/org/apache/lucene/search/TestExplanations.java b/lucene/src/test/org/apache/lucene/search/TestExplanations.java
index a9d52394f7a..ed2f65782a4 100644
--- a/lucene/src/test/org/apache/lucene/search/TestExplanations.java
+++ b/lucene/src/test/org/apache/lucene/search/TestExplanations.java
@@ -22,8 +22,9 @@ import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.spans.SpanFirstQuery;
import org.apache.lucene.search.spans.SpanNearQuery;
@@ -31,6 +32,7 @@ import org.apache.lucene.search.spans.SpanNotQuery;
import org.apache.lucene.search.spans.SpanOrQuery;
import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
+import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
@@ -48,7 +50,9 @@ import org.apache.lucene.util.LuceneTestCase;
*/
public class TestExplanations extends LuceneTestCase {
protected IndexSearcher searcher;
-
+ protected IndexReader reader;
+ protected Directory directory;
+
public static final String KEY = "KEY";
public static final String FIELD = "field";
public static final QueryParser qp =
@@ -57,22 +61,26 @@ public class TestExplanations extends LuceneTestCase {
@Override
protected void tearDown() throws Exception {
searcher.close();
+ reader.close();
+ directory.close();
super.tearDown();
}
@Override
protected void setUp() throws Exception {
super.setUp();
- RAMDirectory directory = new RAMDirectory();
- IndexWriter writer= new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ directory = new RAMDirectory();
+ RandomIndexWriter writer= new RandomIndexWriter(newRandom(), directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < docFields.length; i++) {
Document doc = new Document();
doc.add(new Field(KEY, ""+i, Field.Store.NO, Field.Index.NOT_ANALYZED));
doc.add(new Field(FIELD, docFields[i], Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}
+ reader = writer.getReader();
writer.close();
- searcher = new IndexSearcher(directory, true);
+ searcher = new IndexSearcher(reader);
}
protected String[] docFields = {
diff --git a/lucene/src/test/org/apache/lucene/search/TestFieldCache.java b/lucene/src/test/org/apache/lucene/search/TestFieldCache.java
index 138535cdfb4..348ddc16b19 100644
--- a/lucene/src/test/org/apache/lucene/search/TestFieldCache.java
+++ b/lucene/src/test/org/apache/lucene/search/TestFieldCache.java
@@ -22,6 +22,7 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.store.MockRAMDirectory;
@@ -49,7 +50,8 @@ public class TestFieldCache extends LuceneTestCase {
Random r = newRandom();
NUM_DOCS = 1000 * _TestUtil.getRandomMultiplier();
RAMDirectory directory = new RAMDirectory();
- IndexWriter writer= new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(500));
+ RandomIndexWriter writer= new RandomIndexWriter(r, directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
long theLong = Long.MAX_VALUE;
double theDouble = Double.MAX_VALUE;
byte theByte = Byte.MAX_VALUE;
diff --git a/lucene/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java b/lucene/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java
index 08b96e6e72a..0d89d4f41fb 100644
--- a/lucene/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java
+++ b/lucene/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java
@@ -40,16 +40,9 @@ import org.apache.lucene.store.RAMDirectory;
*/
public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
- public TestFieldCacheRangeFilter(String name) {
- super(name);
- }
- public TestFieldCacheRangeFilter() {
- super();
- }
-
public void testRangeFilterId() throws IOException {
- IndexReader reader = IndexReader.open(signedIndex.index, true);
+ IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
int medId = ((maxId - minId) / 2);
@@ -133,11 +126,11 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
public void testFieldCacheRangeFilterRand() throws IOException {
- IndexReader reader = IndexReader.open(signedIndex.index, true);
+ IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
- String minRP = pad(signedIndex.minR);
- String maxRP = pad(signedIndex.maxR);
+ String minRP = pad(signedIndexDir.minR);
+ String maxRP = pad(signedIndexDir.maxR);
int numDocs = reader.numDocs();
@@ -196,7 +189,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
public void testFieldCacheRangeFilterShorts() throws IOException {
- IndexReader reader = IndexReader.open(signedIndex.index, true);
+ IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
int numDocs = reader.numDocs();
@@ -285,7 +278,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
public void testFieldCacheRangeFilterInts() throws IOException {
- IndexReader reader = IndexReader.open(signedIndex.index, true);
+ IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
int numDocs = reader.numDocs();
@@ -375,7 +368,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
public void testFieldCacheRangeFilterLongs() throws IOException {
- IndexReader reader = IndexReader.open(signedIndex.index, true);
+ IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
int numDocs = reader.numDocs();
@@ -467,7 +460,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
public void testFieldCacheRangeFilterFloats() throws IOException {
- IndexReader reader = IndexReader.open(signedIndex.index, true);
+ IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
int numDocs = reader.numDocs();
@@ -495,7 +488,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
public void testFieldCacheRangeFilterDoubles() throws IOException {
- IndexReader reader = IndexReader.open(signedIndex.index, true);
+ IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
int numDocs = reader.numDocs();
diff --git a/lucene/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java b/lucene/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java
index c591daf1e1d..2babd8cba5b 100644
--- a/lucene/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java
+++ b/lucene/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java
@@ -23,8 +23,8 @@ import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.store.MockRAMDirectory;
import java.util.ArrayList;
@@ -39,16 +39,17 @@ public class TestFieldCacheTermsFilter extends LuceneTestCase {
public void testMissingTerms() throws Exception {
String fieldName = "field1";
MockRAMDirectory rd = new MockRAMDirectory();
- IndexWriter w = new IndexWriter(rd, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ RandomIndexWriter w = new RandomIndexWriter(newRandom(), rd,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 100; i++) {
Document doc = new Document();
int term = i * 10; //terms are units of 10;
doc.add(new Field(fieldName, "" + term, Field.Store.YES, Field.Index.NOT_ANALYZED));
w.addDocument(doc);
}
+ IndexReader reader = w.getReader();
w.close();
- IndexReader reader = IndexReader.open(rd, true);
IndexSearcher searcher = new IndexSearcher(reader);
int numDocs = reader.numDocs();
ScoreDoc[] results;
diff --git a/lucene/src/test/org/apache/lucene/search/TestFilteredQuery.java b/lucene/src/test/org/apache/lucene/search/TestFilteredQuery.java
index c321af1be34..17d66fca409 100644
--- a/lucene/src/test/org/apache/lucene/search/TestFilteredQuery.java
+++ b/lucene/src/test/org/apache/lucene/search/TestFilteredQuery.java
@@ -20,9 +20,9 @@ package org.apache.lucene.search;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.store.RAMDirectory;
@@ -41,6 +41,7 @@ import java.util.BitSet;
public class TestFilteredQuery extends LuceneTestCase {
private IndexSearcher searcher;
+ private IndexReader reader;
private RAMDirectory directory;
private Query query;
private Filter filter;
@@ -49,7 +50,8 @@ public class TestFilteredQuery extends LuceneTestCase {
protected void setUp() throws Exception {
super.setUp();
directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter (directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ RandomIndexWriter writer = new RandomIndexWriter (newRandom(), directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add (new Field("field", "one two three four five", Field.Store.YES, Field.Index.ANALYZED));
@@ -71,10 +73,10 @@ public class TestFilteredQuery extends LuceneTestCase {
doc.add (new Field("sorter", "c", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument (doc);
- writer.optimize ();
+ reader = writer.getReader();
writer.close ();
- searcher = new IndexSearcher (directory, true);
+ searcher = new IndexSearcher (reader);
query = new TermQuery (new Term ("field", "three"));
filter = newStaticFilterB();
}
@@ -95,6 +97,7 @@ public class TestFilteredQuery extends LuceneTestCase {
@Override
protected void tearDown() throws Exception {
searcher.close();
+ reader.close();
directory.close();
super.tearDown();
}
diff --git a/lucene/src/test/org/apache/lucene/search/TestFuzzyQuery.java b/lucene/src/test/org/apache/lucene/search/TestFuzzyQuery.java
index 3f41bc8f1ff..2a05adf8400 100644
--- a/lucene/src/test/org/apache/lucene/search/TestFuzzyQuery.java
+++ b/lucene/src/test/org/apache/lucene/search/TestFuzzyQuery.java
@@ -25,8 +25,8 @@ import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.store.Directory;
@@ -42,7 +42,8 @@ public class TestFuzzyQuery extends LuceneTestCase {
public void testFuzziness() throws Exception {
RAMDirectory directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
addDoc("aaaaa", writer);
addDoc("aaaab", writer);
addDoc("aaabb", writer);
@@ -50,9 +51,10 @@ public class TestFuzzyQuery extends LuceneTestCase {
addDoc("abbbb", writer);
addDoc("bbbbb", writer);
addDoc("ddddd", writer);
- writer.optimize();
+
+ IndexReader reader = writer.getReader();
+ IndexSearcher searcher = new IndexSearcher(reader);
writer.close();
- IndexSearcher searcher = new IndexSearcher(directory, true);
FuzzyQuery query = new FuzzyQuery(new Term("field", "aaaaa"), FuzzyQuery.defaultMinSimilarity, 0);
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
@@ -188,17 +190,20 @@ public class TestFuzzyQuery extends LuceneTestCase {
assertEquals(0, hits.length);
searcher.close();
+ reader.close();
directory.close();
}
public void testFuzzinessLong() throws Exception {
RAMDirectory directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
addDoc("aaaaaaa", writer);
addDoc("segment", writer);
- writer.optimize();
+
+ IndexReader reader = writer.getReader();
+ IndexSearcher searcher = new IndexSearcher(reader);
writer.close();
- IndexSearcher searcher = new IndexSearcher(directory, true);
FuzzyQuery query;
// not similar enough:
@@ -276,17 +281,20 @@ public class TestFuzzyQuery extends LuceneTestCase {
}
searcher.close();
+ reader.close();
directory.close();
}
public void testTokenLengthOpt() throws IOException {
RAMDirectory directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
addDoc("12345678911", writer);
addDoc("segment", writer);
- writer.optimize();
+
+ IndexReader reader = writer.getReader();
+ IndexSearcher searcher = new IndexSearcher(reader);
writer.close();
- IndexSearcher searcher = new IndexSearcher(directory, true);
Query query;
// term not over 10 chars, so optimization shortcuts
@@ -308,20 +316,25 @@ public class TestFuzzyQuery extends LuceneTestCase {
query = new FuzzyQuery(new Term("field", "sdfsdfsdfsdf"), 0.9f);
hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals(0, hits.length);
+
+ searcher.close();
+ reader.close();
+ directory.close();
}
/** Test the TopTermsBoostOnlyBooleanQueryRewrite rewrite method. */
public void testBoostOnlyRewrite() throws Exception {
RAMDirectory directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer()));
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
addDoc("Lucene", writer);
addDoc("Lucene", writer);
addDoc("Lucenne", writer);
- writer.optimize();
+
+ IndexReader reader = writer.getReader();
+ IndexSearcher searcher = new IndexSearcher(reader);
writer.close();
- IndexSearcher searcher = new IndexSearcher(directory, true);
- IndexReader reader = searcher.getIndexReader();
+
FuzzyQuery query = new FuzzyQuery(new Term("field", "Lucene"));
query.setRewriteMethod(new MultiTermQuery.TopTermsBoostOnlyBooleanQueryRewrite());
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
@@ -332,6 +345,7 @@ public class TestFuzzyQuery extends LuceneTestCase {
assertEquals("Lucenne", reader.document(hits[2].doc).get("field"));
searcher.close();
reader.close();
+ directory.close();
}
public void testGiga() throws Exception {
@@ -339,8 +353,8 @@ public class TestFuzzyQuery extends LuceneTestCase {
MockAnalyzer analyzer = new MockAnalyzer();
Directory index = new MockRAMDirectory();
- IndexWriter w = new IndexWriter(index, new IndexWriterConfig(
- TEST_VERSION_CURRENT, analyzer));
+ RandomIndexWriter w = new RandomIndexWriter(newRandom(), index,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
addDoc("Lucene in Action", w);
addDoc("Lucene for Dummies", w);
@@ -369,10 +383,12 @@ public class TestFuzzyQuery extends LuceneTestCase {
ScoreDoc[] hits = searcher.search(q, 10).scoreDocs;
assertEquals(1, hits.length);
assertEquals("Giga byte", searcher.doc(hits[0].doc).get("field"));
+ searcher.close();
r.close();
+ index.close();
}
- private void addDoc(String text, IndexWriter writer) throws IOException {
+ private void addDoc(String text, RandomIndexWriter writer) throws IOException {
Document doc = new Document();
doc.add(new Field("field", text, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
diff --git a/lucene/src/test/org/apache/lucene/search/TestFuzzyQuery2.java b/lucene/src/test/org/apache/lucene/search/TestFuzzyQuery2.java
index 4e4125a1e96..e24b713c624 100644
--- a/lucene/src/test/org/apache/lucene/search/TestFuzzyQuery2.java
+++ b/lucene/src/test/org/apache/lucene/search/TestFuzzyQuery2.java
@@ -20,12 +20,15 @@ package org.apache.lucene.search;
import java.io.BufferedReader;
import java.io.InputStream;
import java.io.InputStreamReader;
+import java.util.Random;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
@@ -55,6 +58,13 @@ import org.apache.lucene.util.LuceneTestCase;
public class TestFuzzyQuery2 extends LuceneTestCase {
/** epsilon for score comparisons */
static final float epsilon = 0.00001f;
+ private Random random;
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ random = newRandom();
+ }
public void testFromTestData() throws Exception {
// TODO: randomize!
@@ -78,8 +88,8 @@ public class TestFuzzyQuery2 extends LuceneTestCase {
int terms = (int) Math.pow(2, bits);
RAMDirectory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new MockAnalyzer(MockTokenizer.KEYWORD, false),
- IndexWriter.MaxFieldLength.UNLIMITED);
+ RandomIndexWriter writer = new RandomIndexWriter(random, dir,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.KEYWORD, false)));
Document doc = new Document();
Field field = new Field("field", "", Field.Store.NO, Field.Index.ANALYZED);
@@ -88,12 +98,11 @@ public class TestFuzzyQuery2 extends LuceneTestCase {
for (int i = 0; i < terms; i++) {
field.setValue(mapInt(codePointTable, i));
writer.addDocument(doc);
- }
+ }
- writer.optimize();
- writer.close();
-
- IndexSearcher searcher = new IndexSearcher(dir);
+ IndexReader r = writer.getReader();
+ IndexSearcher searcher = new IndexSearcher(r);
+ writer.close();
String line;
while ((line = reader.readLine()) != null) {
String params[] = line.split(",");
@@ -113,6 +122,7 @@ public class TestFuzzyQuery2 extends LuceneTestCase {
}
}
searcher.close();
+ r.close();
dir.close();
}
diff --git a/lucene/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java b/lucene/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java
index c10d4fee86b..37d4e3a3969 100644
--- a/lucene/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java
+++ b/lucene/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java
@@ -18,10 +18,10 @@ package org.apache.lucene.search;
*/
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.store.MockRAMDirectory;
@@ -36,236 +36,245 @@ import java.util.LinkedList;
/**
* This class tests the MultiPhraseQuery class.
- *
- *
+ *
+ *
*/
-public class TestMultiPhraseQuery extends LuceneTestCase
-{
- public TestMultiPhraseQuery(String name) {
- super(name);
- }
-
- public void testPhrasePrefix() throws IOException {
- MockRAMDirectory indexStore = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(indexStore, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
- add("blueberry pie", writer);
- add("blueberry strudel", writer);
- add("blueberry pizza", writer);
- add("blueberry chewing gum", writer);
- add("bluebird pizza", writer);
- add("bluebird foobar pizza", writer);
- add("piccadilly circus", writer);
- writer.optimize();
- writer.close();
-
- IndexSearcher searcher = new IndexSearcher(indexStore, true);
-
- // search for "blueberry pi*":
- MultiPhraseQuery query1 = new MultiPhraseQuery();
- // search for "strawberry pi*":
- MultiPhraseQuery query2 = new MultiPhraseQuery();
- query1.add(new Term("body", "blueberry"));
- query2.add(new Term("body", "strawberry"));
-
- LinkedList termsWithPrefix = new LinkedList();
- IndexReader ir = IndexReader.open(indexStore, true);
-
- // this TermEnum gives "piccadilly", "pie" and "pizza".
- String prefix = "pi";
- TermsEnum te = MultiFields.getFields(ir).terms("body").iterator();
- te.seek(new BytesRef(prefix));
- do {
- String s = te.term().utf8ToString();
- if (s.startsWith(prefix)) {
- termsWithPrefix.add(new Term("body", s));
- } else {
- break;
- }
- } while (te.next() != null);
-
- query1.add(termsWithPrefix.toArray(new Term[0]));
- assertEquals("body:\"blueberry (piccadilly pie pizza)\"", query1.toString());
- query2.add(termsWithPrefix.toArray(new Term[0]));
- assertEquals("body:\"strawberry (piccadilly pie pizza)\"", query2.toString());
-
- ScoreDoc[] result;
- result = searcher.search(query1, null, 1000).scoreDocs;
- assertEquals(2, result.length);
- result = searcher.search(query2, null, 1000).scoreDocs;
- assertEquals(0, result.length);
-
- // search for "blue* pizza":
- MultiPhraseQuery query3 = new MultiPhraseQuery();
- termsWithPrefix.clear();
- prefix = "blue";
- te.seek(new BytesRef(prefix));
-
- do {
- if (te.term().utf8ToString().startsWith(prefix))
- {
- termsWithPrefix.add(new Term("body", te.term().utf8ToString()));
- }
- } while (te.next() != null);
- ir.close();
- query3.add(termsWithPrefix.toArray(new Term[0]));
- query3.add(new Term("body", "pizza"));
-
- result = searcher.search(query3, null, 1000).scoreDocs;
- assertEquals(2, result.length); // blueberry pizza, bluebird pizza
- assertEquals("body:\"(blueberry bluebird) pizza\"", query3.toString());
-
- // test slop:
- query3.setSlop(1);
- result = searcher.search(query3, null, 1000).scoreDocs;
-
- // just make sure no exc:
- searcher.explain(query3, 0);
-
- assertEquals(3, result.length); // blueberry pizza, bluebird pizza, bluebird foobar pizza
-
- MultiPhraseQuery query4 = new MultiPhraseQuery();
- try {
- query4.add(new Term("field1", "foo"));
- query4.add(new Term("field2", "foobar"));
- fail();
- } catch(IllegalArgumentException e) {
- // okay, all terms must belong to the same field
- }
-
- searcher.close();
- indexStore.close();
-
- }
-
- private void add(String s, IndexWriter writer) throws IOException {
- Document doc = new Document();
- doc.add(new Field("body", s, Field.Store.YES, Field.Index.ANALYZED));
- writer.addDocument(doc);
- }
-
- public void testBooleanQueryContainingSingleTermPrefixQuery() throws IOException {
- // this tests against bug 33161 (now fixed)
- // In order to cause the bug, the outer query must have more than one term
- // and all terms required.
- // The contained PhraseMultiQuery must contain exactly one term array.
-
- MockRAMDirectory indexStore = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(indexStore, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
- add("blueberry pie", writer);
- add("blueberry chewing gum", writer);
- add("blue raspberry pie", writer);
- writer.optimize();
- writer.close();
-
- IndexSearcher searcher = new IndexSearcher(indexStore, true);
- // This query will be equivalent to +body:pie +body:"blue*"
- BooleanQuery q = new BooleanQuery();
- q.add(new TermQuery(new Term("body", "pie")), BooleanClause.Occur.MUST);
-
- MultiPhraseQuery trouble = new MultiPhraseQuery();
- trouble.add(new Term[] {
- new Term("body", "blueberry"),
- new Term("body", "blue")
- });
- q.add(trouble, BooleanClause.Occur.MUST);
-
- // exception will be thrown here without fix
- ScoreDoc[] hits = searcher.search(q, null, 1000).scoreDocs;
-
- assertEquals("Wrong number of hits", 2, hits.length);
-
- // just make sure no exc:
- searcher.explain(q, 0);
-
- searcher.close();
- indexStore.close();
+public class TestMultiPhraseQuery extends LuceneTestCase {
+ public TestMultiPhraseQuery(String name) {
+ super(name);
}
-
- public void testPhrasePrefixWithBooleanQuery() throws IOException {
+
+ public void testPhrasePrefix() throws IOException {
MockRAMDirectory indexStore = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(indexStore, new IndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer()));
- add("This is a test", "object", writer);
- add("a note", "note", writer);
- writer.close();
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), indexStore,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ add("blueberry pie", writer);
+ add("blueberry strudel", writer);
+ add("blueberry pizza", writer);
+ add("blueberry chewing gum", writer);
+ add("bluebird pizza", writer);
+ add("bluebird foobar pizza", writer);
+ add("piccadilly circus", writer);
- IndexSearcher searcher = new IndexSearcher(indexStore, true);
-
- // This query will be equivalent to +type:note +body:"a t*"
- BooleanQuery q = new BooleanQuery();
- q.add(new TermQuery(new Term("type", "note")), BooleanClause.Occur.MUST);
-
- MultiPhraseQuery trouble = new MultiPhraseQuery();
- trouble.add(new Term("body", "a"));
- trouble.add(new Term[] { new Term("body", "test"), new Term("body", "this") });
- q.add(trouble, BooleanClause.Occur.MUST);
-
- // exception will be thrown here without fix for #35626:
- ScoreDoc[] hits = searcher.search(q, null, 1000).scoreDocs;
- assertEquals("Wrong number of hits", 0, hits.length);
+ IndexReader reader = writer.getReader();
+ IndexSearcher searcher = new IndexSearcher(reader);
+
+ // search for "blueberry pi*":
+ MultiPhraseQuery query1 = new MultiPhraseQuery();
+ // search for "strawberry pi*":
+ MultiPhraseQuery query2 = new MultiPhraseQuery();
+ query1.add(new Term("body", "blueberry"));
+ query2.add(new Term("body", "strawberry"));
+
+ LinkedList termsWithPrefix = new LinkedList();
+
+ // this TermEnum gives "piccadilly", "pie" and "pizza".
+ String prefix = "pi";
+ TermsEnum te = MultiFields.getFields(reader).terms("body").iterator();
+ te.seek(new BytesRef(prefix));
+ do {
+ String s = te.term().utf8ToString();
+ if (s.startsWith(prefix)) {
+ termsWithPrefix.add(new Term("body", s));
+ } else {
+ break;
+ }
+ } while (te.next() != null);
+
+ query1.add(termsWithPrefix.toArray(new Term[0]));
+ assertEquals("body:\"blueberry (piccadilly pie pizza)\"", query1.toString());
+ query2.add(termsWithPrefix.toArray(new Term[0]));
+ assertEquals("body:\"strawberry (piccadilly pie pizza)\"", query2
+ .toString());
+
+ ScoreDoc[] result;
+ result = searcher.search(query1, null, 1000).scoreDocs;
+ assertEquals(2, result.length);
+ result = searcher.search(query2, null, 1000).scoreDocs;
+ assertEquals(0, result.length);
+
+ // search for "blue* pizza":
+ MultiPhraseQuery query3 = new MultiPhraseQuery();
+ termsWithPrefix.clear();
+ prefix = "blue";
+ te.seek(new BytesRef(prefix));
+
+ do {
+ if (te.term().utf8ToString().startsWith(prefix)) {
+ termsWithPrefix.add(new Term("body", te.term().utf8ToString()));
+ }
+ } while (te.next() != null);
+
+ query3.add(termsWithPrefix.toArray(new Term[0]));
+ query3.add(new Term("body", "pizza"));
+
+ result = searcher.search(query3, null, 1000).scoreDocs;
+ assertEquals(2, result.length); // blueberry pizza, bluebird pizza
+ assertEquals("body:\"(blueberry bluebird) pizza\"", query3.toString());
+
+ // test slop:
+ query3.setSlop(1);
+ result = searcher.search(query3, null, 1000).scoreDocs;
+
+ // just make sure no exc:
+ searcher.explain(query3, 0);
+
+ assertEquals(3, result.length); // blueberry pizza, bluebird pizza, bluebird
+ // foobar pizza
+
+ MultiPhraseQuery query4 = new MultiPhraseQuery();
+ try {
+ query4.add(new Term("field1", "foo"));
+ query4.add(new Term("field2", "foobar"));
+ fail();
+ } catch (IllegalArgumentException e) {
+ // okay, all terms must belong to the same field
+ }
+
+ writer.close();
searcher.close();
+ reader.close();
indexStore.close();
+
}
-
- public void testNoDocs() throws Exception {
+
+ private void add(String s, RandomIndexWriter writer) throws IOException {
+ Document doc = new Document();
+ doc.add(new Field("body", s, Field.Store.YES, Field.Index.ANALYZED));
+ writer.addDocument(doc);
+ }
+
+ public void testBooleanQueryContainingSingleTermPrefixQuery()
+ throws IOException {
+ // this tests against bug 33161 (now fixed)
+ // In order to cause the bug, the outer query must have more than one term
+ // and all terms required.
+ // The contained PhraseMultiQuery must contain exactly one term array.
+
MockRAMDirectory indexStore = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(indexStore, new MockAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
- add("a note", "note", writer);
- writer.close();
-
- IndexSearcher searcher = new IndexSearcher(indexStore, true);
-
- MultiPhraseQuery q = new MultiPhraseQuery();
- q.add(new Term("body", "a"));
- q.add(new Term[] { new Term("body", "nope"), new Term("body", "nope") });
- assertEquals("Wrong number of hits", 0, searcher.search(q, null, 1).totalHits);
-
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), indexStore,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ add("blueberry pie", writer);
+ add("blueberry chewing gum", writer);
+ add("blue raspberry pie", writer);
+
+ IndexReader reader = writer.getReader();
+ IndexSearcher searcher = new IndexSearcher(reader);
+ // This query will be equivalent to +body:pie +body:"blue*"
+ BooleanQuery q = new BooleanQuery();
+ q.add(new TermQuery(new Term("body", "pie")), BooleanClause.Occur.MUST);
+
+ MultiPhraseQuery trouble = new MultiPhraseQuery();
+ trouble.add(new Term[] {new Term("body", "blueberry"),
+ new Term("body", "blue")});
+ q.add(trouble, BooleanClause.Occur.MUST);
+
+ // exception will be thrown here without fix
+ ScoreDoc[] hits = searcher.search(q, null, 1000).scoreDocs;
+
+ assertEquals("Wrong number of hits", 2, hits.length);
+
// just make sure no exc:
searcher.explain(q, 0);
-
+
+ writer.close();
searcher.close();
+ reader.close();
indexStore.close();
}
- public void testHashCodeAndEquals(){
+ public void testPhrasePrefixWithBooleanQuery() throws IOException {
+ MockRAMDirectory indexStore = new MockRAMDirectory();
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), indexStore,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ add("This is a test", "object", writer);
+ add("a note", "note", writer);
+
+ IndexReader reader = writer.getReader();
+ IndexSearcher searcher = new IndexSearcher(reader);
+
+ // This query will be equivalent to +type:note +body:"a t*"
+ BooleanQuery q = new BooleanQuery();
+ q.add(new TermQuery(new Term("type", "note")), BooleanClause.Occur.MUST);
+
+ MultiPhraseQuery trouble = new MultiPhraseQuery();
+ trouble.add(new Term("body", "a"));
+ trouble
+ .add(new Term[] {new Term("body", "test"), new Term("body", "this")});
+ q.add(trouble, BooleanClause.Occur.MUST);
+
+ // exception will be thrown here without fix for #35626:
+ ScoreDoc[] hits = searcher.search(q, null, 1000).scoreDocs;
+ assertEquals("Wrong number of hits", 0, hits.length);
+ writer.close();
+ searcher.close();
+ reader.close();
+ indexStore.close();
+ }
+
+ public void testNoDocs() throws Exception {
+ MockRAMDirectory indexStore = new MockRAMDirectory();
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), indexStore,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ add("a note", "note", writer);
+
+ IndexReader reader = writer.getReader();
+ IndexSearcher searcher = new IndexSearcher(reader);
+
+ MultiPhraseQuery q = new MultiPhraseQuery();
+ q.add(new Term("body", "a"));
+ q.add(new Term[] {new Term("body", "nope"), new Term("body", "nope")});
+ assertEquals("Wrong number of hits", 0,
+ searcher.search(q, null, 1).totalHits);
+
+ // just make sure no exc:
+ searcher.explain(q, 0);
+
+ writer.close();
+ searcher.close();
+ reader.close();
+ indexStore.close();
+ }
+
+ public void testHashCodeAndEquals() {
MultiPhraseQuery query1 = new MultiPhraseQuery();
MultiPhraseQuery query2 = new MultiPhraseQuery();
assertEquals(query1.hashCode(), query2.hashCode());
- assertEquals(query1,query2);
+ assertEquals(query1, query2);
- Term term1= new Term("someField","someText");
+ Term term1 = new Term("someField", "someText");
query1.add(term1);
query2.add(term1);
assertEquals(query1.hashCode(), query2.hashCode());
- assertEquals(query1,query2);
+ assertEquals(query1, query2);
- Term term2= new Term("someField","someMoreText");
+ Term term2 = new Term("someField", "someMoreText");
query1.add(term2);
- assertFalse(query1.hashCode()==query2.hashCode());
+ assertFalse(query1.hashCode() == query2.hashCode());
assertFalse(query1.equals(query2));
query2.add(term2);
assertEquals(query1.hashCode(), query2.hashCode());
- assertEquals(query1,query2);
+ assertEquals(query1, query2);
}
-
- private void add(String s, String type, IndexWriter writer) throws IOException {
+ private void add(String s, String type, RandomIndexWriter writer)
+ throws IOException {
Document doc = new Document();
doc.add(new Field("body", s, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("type", type, Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
}
-
+
// LUCENE-2526
public void testEmptyToString() {
new MultiPhraseQuery().toString();
}
-
+
}
diff --git a/lucene/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java b/lucene/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java
index d76dc7133fd..9f81b48e912 100644
--- a/lucene/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java
+++ b/lucene/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java
@@ -22,8 +22,8 @@ import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
@@ -38,15 +38,8 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
/** threshold for comparing floats */
public static final float SCORE_COMP_THRESH = 1e-6f;
- public TestMultiTermConstantScore(String name) {
- super(name);
- }
-
- public TestMultiTermConstantScore() {
- super();
- }
-
Directory small;
+ IndexReader reader;
void assertEquals(String m, float e, float a) {
assertEquals(m, e, a, SCORE_COMP_THRESH);
@@ -59,13 +52,13 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
@Override
protected void setUp() throws Exception {
super.setUp();
-
String[] data = new String[] { "A 1 2 3 4 5 6", "Z 4 5 6", null,
"B 2 4 5 6", "Y 3 5 6", null, "C 3 6",
"X 4 5 6" };
small = new RAMDirectory();
- IndexWriter writer = new IndexWriter(small, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false)));
+ RandomIndexWriter writer = new RandomIndexWriter(rand, small,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false)));
for (int i = 0; i < data.length; i++) {
Document doc = new Document();
@@ -81,10 +74,17 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
writer.addDocument(doc);
}
- writer.optimize();
+ reader = writer.getReader();
writer.close();
}
+ @Override
+ protected void tearDown() throws Exception {
+ reader.close();
+ small.close();
+ super.tearDown();
+ }
+
/** macro for readability */
public static Query csrq(String f, String l, String h, boolean il, boolean ih) {
TermRangeQuery query = new TermRangeQuery(f, l, h, il, ih);
@@ -146,7 +146,6 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
public void testEqualScores() throws IOException {
// NOTE: uses index build in *this* setUp
- IndexReader reader = IndexReader.open(small, true);
IndexSearcher search = new IndexSearcher(reader);
ScoreDoc[] result;
@@ -175,7 +174,6 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
public void testBoost() throws IOException {
// NOTE: uses index build in *this* setUp
- IndexReader reader = IndexReader.open(small, true);
IndexSearcher search = new IndexSearcher(reader);
// test for correct application of query normalization
@@ -247,7 +245,6 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
public void testBooleanOrderUnAffected() throws IOException {
// NOTE: uses index build in *this* setUp
- IndexReader reader = IndexReader.open(small, true);
IndexSearcher search = new IndexSearcher(reader);
// first do a regular TermRangeQuery which uses term expansion so
@@ -278,7 +275,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
public void testRangeQueryId() throws IOException {
// NOTE: uses index build in *super* setUp
- IndexReader reader = IndexReader.open(signedIndex.index, true);
+ IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
int medId = ((maxId - minId) / 2);
@@ -405,7 +402,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
public void testRangeQueryIdCollating() throws IOException {
// NOTE: uses index build in *super* setUp
- IndexReader reader = IndexReader.open(signedIndex.index, true);
+ IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
int medId = ((maxId - minId) / 2);
@@ -488,11 +485,11 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
public void testRangeQueryRand() throws IOException {
// NOTE: uses index build in *super* setUp
- IndexReader reader = IndexReader.open(signedIndex.index, true);
+ IndexReader reader = signedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
- String minRP = pad(signedIndex.minR);
- String maxRP = pad(signedIndex.maxR);
+ String minRP = pad(signedIndexDir.minR);
+ String maxRP = pad(signedIndexDir.maxR);
int numDocs = reader.numDocs();
@@ -551,11 +548,11 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
// NOTE: uses index build in *super* setUp
// using the unsigned index because collation seems to ignore hyphens
- IndexReader reader = IndexReader.open(unsignedIndex.index, true);
+ IndexReader reader = unsignedIndexReader;
IndexSearcher search = new IndexSearcher(reader);
- String minRP = pad(unsignedIndex.minR);
- String maxRP = pad(unsignedIndex.maxR);
+ String minRP = pad(unsignedIndexDir.minR);
+ String maxRP = pad(unsignedIndexDir.maxR);
int numDocs = reader.numDocs();
@@ -615,8 +612,8 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
/* build an index */
RAMDirectory farsiIndex = new RAMDirectory();
- IndexWriter writer = new IndexWriter(farsiIndex, new IndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true)));
+ RandomIndexWriter writer = new RandomIndexWriter(rand, farsiIndex,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true)));
Document doc = new Document();
doc.add(new Field("content", "\u0633\u0627\u0628", Field.Store.YES,
Field.Index.NOT_ANALYZED));
@@ -625,10 +622,9 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
- writer.optimize();
+ IndexReader reader = writer.getReader();
writer.close();
- IndexReader reader = IndexReader.open(farsiIndex, true);
IndexSearcher search = new IndexSearcher(reader);
// Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in
@@ -649,14 +645,16 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
1000).scoreDocs;
assertEquals("The index Term should be included.", 1, result.length);
search.close();
+ reader.close();
+ farsiIndex.close();
}
public void testDanish() throws Exception {
/* build an index */
RAMDirectory danishIndex = new RAMDirectory();
- IndexWriter writer = new IndexWriter(danishIndex, new IndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true)));
+ RandomIndexWriter writer = new RandomIndexWriter(rand, danishIndex,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true)));
// Danish collation orders the words below in the given order
// (example taken from TestSort.testInternationalSort() ).
@@ -669,10 +667,9 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
}
- writer.optimize();
+ IndexReader reader = writer.getReader();
writer.close();
- IndexReader reader = IndexReader.open(danishIndex, true);
IndexSearcher search = new IndexSearcher(reader);
Collator c = Collator.getInstance(new Locale("da", "dk"));
@@ -687,5 +684,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
(csrq("content", "H\u00C5T", "MAND", F, F, c), null, 1000).scoreDocs;
assertEquals("The index Term should not be included.", 0, result.length);
search.close();
+ reader.close();
+ danishIndex.close();
}
}
diff --git a/lucene/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java b/lucene/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java
index c28831a04ec..eae12c42aff 100644
--- a/lucene/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java
+++ b/lucene/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java
@@ -26,8 +26,9 @@ import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.NumericField;
-import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util._TestUtil;
@@ -44,7 +45,8 @@ public class TestMultiValuedNumericRangeQuery extends LuceneTestCase {
final Random rnd = newRandom();
RAMDirectory directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ RandomIndexWriter writer = new RandomIndexWriter(rnd, directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
DecimalFormat format = new DecimalFormat("00000000000", new DecimalFormatSymbols(Locale.US));
@@ -56,10 +58,11 @@ public class TestMultiValuedNumericRangeQuery extends LuceneTestCase {
doc.add(new NumericField("trie", Field.Store.NO, true).setIntValue(value));
}
writer.addDocument(doc);
- }
+ }
+ IndexReader reader = writer.getReader();
writer.close();
- Searcher searcher=new IndexSearcher(directory, true);
+ Searcher searcher=new IndexSearcher(reader);
for (int i=0; i<50*_TestUtil.getRandomMultiplier(); i++) {
int lower=rnd.nextInt(Integer.MAX_VALUE);
int upper=rnd.nextInt(Integer.MAX_VALUE);
@@ -73,7 +76,7 @@ public class TestMultiValuedNumericRangeQuery extends LuceneTestCase {
assertEquals("Returned count for NumericRangeQuery and TermRangeQuery must be equal", trTopDocs.totalHits, nrTopDocs.totalHits );
}
searcher.close();
-
+ reader.close();
directory.close();
}
diff --git a/lucene/src/test/org/apache/lucene/search/TestNot.java b/lucene/src/test/org/apache/lucene/search/TestNot.java
index 8eed0647a8a..77b6630ebda 100644
--- a/lucene/src/test/org/apache/lucene/search/TestNot.java
+++ b/lucene/src/test/org/apache/lucene/search/TestNot.java
@@ -19,8 +19,9 @@ package org.apache.lucene.search;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.analysis.MockAnalyzer;
@@ -39,21 +40,24 @@ public class TestNot extends LuceneTestCase {
public void testNot() throws Exception {
RAMDirectory store = new RAMDirectory();
- IndexWriter writer = new IndexWriter(store, new IndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer()));
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), store,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document d1 = new Document();
d1.add(new Field("field", "a b", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(d1);
- writer.optimize();
- writer.close();
+ IndexReader reader = writer.getReader();
- Searcher searcher = new IndexSearcher(store, true);
+ Searcher searcher = new IndexSearcher(reader);
QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer());
Query query = parser.parse("a NOT b");
//System.out.println(query);
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals(0, hits.length);
+ writer.close();
+ searcher.close();
+ reader.close();
+ store.close();
}
}
diff --git a/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java b/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java
index bfefa7da94d..92287fac5e9 100644
--- a/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java
+++ b/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java
@@ -23,8 +23,10 @@ import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.NumericField;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.BytesRef;
@@ -46,12 +48,15 @@ public class TestNumericRangeQuery32 extends LuceneTestCaseJ4 {
private static final int noDocs = 10000*_TestUtil.getRandomMultiplier();
private static RAMDirectory directory = null;
+ private static IndexReader reader = null;
private static IndexSearcher searcher = null;
-
+
@BeforeClass
public static void beforeClass() throws Exception {
directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ Random random = newStaticRandom(TestNumericRangeQuery32.class);
+ RandomIndexWriter writer = new RandomIndexWriter(random, directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
NumericField
field8 = new NumericField("field8", 8, Field.Store.YES, true),
@@ -83,15 +88,17 @@ public class TestNumericRangeQuery32 extends LuceneTestCaseJ4 {
writer.addDocument(doc);
}
- writer.optimize();
+ reader = writer.getReader();
+ searcher=new IndexSearcher(reader);
writer.close();
- searcher=new IndexSearcher(directory, true);
}
@AfterClass
public static void afterClass() throws Exception {
searcher.close();
searcher = null;
+ reader.close();
+ reader = null;
directory.close();
directory = null;
}
@@ -147,7 +154,7 @@ public class TestNumericRangeQuery32 extends LuceneTestCaseJ4 {
assertEquals("First doc"+type, 2*distance+startOffset, Integer.parseInt(doc.get(field)) );
doc=searcher.doc(sd[sd.length-1].doc);
assertEquals("Last doc"+type, (1+count)*distance+startOffset, Integer.parseInt(doc.get(field)) );
- if (i>0) {
+ if (i>0 && searcher.getIndexReader().getSequentialSubReaders().length == 1) {
assertEquals("Distinct term number is equal for all query types", lastTerms, terms);
}
lastTerms = terms;
@@ -372,7 +379,7 @@ public class TestNumericRangeQuery32 extends LuceneTestCaseJ4 {
termCountT += tq.getTotalNumberOfTerms();
termCountC += cq.getTotalNumberOfTerms();
}
- if (precisionStep == Integer.MAX_VALUE) {
+ if (precisionStep == Integer.MAX_VALUE && searcher.getIndexReader().getSequentialSubReaders().length == 1) {
assertEquals("Total number of terms should be equal for unlimited precStep", termCountT, termCountC);
} else if (VERBOSE) {
System.out.println("Average number of terms during random search on '" + field + "':");
diff --git a/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java b/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java
index da431d0aec7..8081ca7ea90 100644
--- a/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java
+++ b/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java
@@ -23,8 +23,10 @@ import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.NumericField;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.LuceneTestCaseJ4;
@@ -45,12 +47,15 @@ public class TestNumericRangeQuery64 extends LuceneTestCaseJ4 {
private static final int noDocs = 10000*_TestUtil.getRandomMultiplier();
private static RAMDirectory directory = null;
+ private static IndexReader reader = null;
private static IndexSearcher searcher = null;
@BeforeClass
public static void beforeClass() throws Exception {
directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ Random random = newStaticRandom(TestNumericRangeQuery64.class);
+ RandomIndexWriter writer = new RandomIndexWriter(random, directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
NumericField
field8 = new NumericField("field8", 8, Field.Store.YES, true),
@@ -86,15 +91,17 @@ public class TestNumericRangeQuery64 extends LuceneTestCaseJ4 {
writer.addDocument(doc);
}
- writer.optimize();
+ reader = writer.getReader();
+ searcher=new IndexSearcher(reader);
writer.close();
- searcher=new IndexSearcher(directory, true);
}
@AfterClass
public static void afterClass() throws Exception {
searcher.close();
searcher = null;
+ reader.close();
+ reader = null;
directory.close();
directory = null;
}
@@ -150,7 +157,7 @@ public class TestNumericRangeQuery64 extends LuceneTestCaseJ4 {
assertEquals("First doc"+type, 2*distance+startOffset, Long.parseLong(doc.get(field)) );
doc=searcher.doc(sd[sd.length-1].doc);
assertEquals("Last doc"+type, (1+count)*distance+startOffset, Long.parseLong(doc.get(field)) );
- if (i>0) {
+ if (i>0 && searcher.getIndexReader().getSequentialSubReaders().length == 1) {
assertEquals("Distinct term number is equal for all query types", lastTerms, terms);
}
lastTerms = terms;
@@ -391,7 +398,7 @@ public class TestNumericRangeQuery64 extends LuceneTestCaseJ4 {
termCountT += tq.getTotalNumberOfTerms();
termCountC += cq.getTotalNumberOfTerms();
}
- if (precisionStep == Integer.MAX_VALUE) {
+ if (precisionStep == Integer.MAX_VALUE && searcher.getIndexReader().getSequentialSubReaders().length == 1) {
assertEquals("Total number of terms should be equal for unlimited precStep", termCountT, termCountC);
} else if (VERBOSE) {
System.out.println("Average number of terms during random search on '" + field + "':");
diff --git a/lucene/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java b/lucene/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java
index 1464c5e7f3d..a39a9d8db94 100644
--- a/lucene/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java
+++ b/lucene/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java
@@ -21,9 +21,9 @@ import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.MultiFields;
@@ -40,66 +40,72 @@ public class TestPhrasePrefixQuery extends LuceneTestCase {
public TestPhrasePrefixQuery(String name) {
super(name);
}
-
- /**
+
+ /**
*
*/
- public void testPhrasePrefix()
- throws IOException
- {
- RAMDirectory indexStore = new RAMDirectory();
- IndexWriter writer = new IndexWriter(indexStore, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
- Document doc1 = new Document();
- Document doc2 = new Document();
- Document doc3 = new Document();
- Document doc4 = new Document();
- Document doc5 = new Document();
- doc1.add(new Field("body", "blueberry pie", Field.Store.YES, Field.Index.ANALYZED));
- doc2.add(new Field("body", "blueberry strudel", Field.Store.YES, Field.Index.ANALYZED));
- doc3.add(new Field("body", "blueberry pizza", Field.Store.YES, Field.Index.ANALYZED));
- doc4.add(new Field("body", "blueberry chewing gum", Field.Store.YES, Field.Index.ANALYZED));
- doc5.add(new Field("body", "piccadilly circus", Field.Store.YES, Field.Index.ANALYZED));
- writer.addDocument(doc1);
- writer.addDocument(doc2);
- writer.addDocument(doc3);
- writer.addDocument(doc4);
- writer.addDocument(doc5);
- writer.optimize();
- writer.close();
-
- IndexSearcher searcher = new IndexSearcher(indexStore, true);
-
- //PhrasePrefixQuery query1 = new PhrasePrefixQuery();
- MultiPhraseQuery query1 = new MultiPhraseQuery();
- //PhrasePrefixQuery query2 = new PhrasePrefixQuery();
- MultiPhraseQuery query2 = new MultiPhraseQuery();
- query1.add(new Term("body", "blueberry"));
- query2.add(new Term("body", "strawberry"));
-
- LinkedList termsWithPrefix = new LinkedList();
- IndexReader ir = IndexReader.open(indexStore, true);
-
- // this TermEnum gives "piccadilly", "pie" and "pizza".
- String prefix = "pi";
- TermsEnum te = MultiFields.getFields(ir).terms("body").iterator();
- te.seek(new BytesRef(prefix));
- do {
- String s = te.term().utf8ToString();
- if (s.startsWith(prefix)) {
- termsWithPrefix.add(new Term("body", s));
- } else {
- break;
- }
- } while (te.next() != null);
-
- query1.add(termsWithPrefix.toArray(new Term[0]));
- query2.add(termsWithPrefix.toArray(new Term[0]));
-
- ScoreDoc[] result;
- result = searcher.search(query1, null, 1000).scoreDocs;
- assertEquals(2, result.length);
-
- result = searcher.search(query2, null, 1000).scoreDocs;
- assertEquals(0, result.length);
- }
+ public void testPhrasePrefix() throws IOException {
+ RAMDirectory indexStore = new RAMDirectory();
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), indexStore,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ Document doc1 = new Document();
+ Document doc2 = new Document();
+ Document doc3 = new Document();
+ Document doc4 = new Document();
+ Document doc5 = new Document();
+ doc1.add(new Field("body", "blueberry pie", Field.Store.YES,
+ Field.Index.ANALYZED));
+ doc2.add(new Field("body", "blueberry strudel", Field.Store.YES,
+ Field.Index.ANALYZED));
+ doc3.add(new Field("body", "blueberry pizza", Field.Store.YES,
+ Field.Index.ANALYZED));
+ doc4.add(new Field("body", "blueberry chewing gum", Field.Store.YES,
+ Field.Index.ANALYZED));
+ doc5.add(new Field("body", "piccadilly circus", Field.Store.YES,
+ Field.Index.ANALYZED));
+ writer.addDocument(doc1);
+ writer.addDocument(doc2);
+ writer.addDocument(doc3);
+ writer.addDocument(doc4);
+ writer.addDocument(doc5);
+ IndexReader reader = writer.getReader();
+ writer.close();
+
+ IndexSearcher searcher = new IndexSearcher(reader);
+
+ // PhrasePrefixQuery query1 = new PhrasePrefixQuery();
+ MultiPhraseQuery query1 = new MultiPhraseQuery();
+ // PhrasePrefixQuery query2 = new PhrasePrefixQuery();
+ MultiPhraseQuery query2 = new MultiPhraseQuery();
+ query1.add(new Term("body", "blueberry"));
+ query2.add(new Term("body", "strawberry"));
+
+ LinkedList termsWithPrefix = new LinkedList();
+
+ // this TermEnum gives "piccadilly", "pie" and "pizza".
+ String prefix = "pi";
+ TermsEnum te = MultiFields.getFields(reader).terms("body").iterator();
+ te.seek(new BytesRef(prefix));
+ do {
+ String s = te.term().utf8ToString();
+ if (s.startsWith(prefix)) {
+ termsWithPrefix.add(new Term("body", s));
+ } else {
+ break;
+ }
+ } while (te.next() != null);
+
+ query1.add(termsWithPrefix.toArray(new Term[0]));
+ query2.add(termsWithPrefix.toArray(new Term[0]));
+
+ ScoreDoc[] result;
+ result = searcher.search(query1, null, 1000).scoreDocs;
+ assertEquals(2, result.length);
+
+ result = searcher.search(query2, null, 1000).scoreDocs;
+ assertEquals(0, result.length);
+ searcher.close();
+ reader.close();
+ indexStore.close();
+ }
}
diff --git a/lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java b/lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java
index 8d1cc5dcfd3..2356330814f 100644
--- a/lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java
+++ b/lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java
@@ -46,12 +46,15 @@ public class TestPhraseQuery extends LuceneTestCase {
public static final float SCORE_COMP_THRESH = 1e-6f;
private IndexSearcher searcher;
+ private IndexReader reader;
private PhraseQuery query;
private RAMDirectory directory;
+ private Random random;
@Override
public void setUp() throws Exception {
super.setUp();
+ random = newRandom();
directory = new RAMDirectory();
Analyzer analyzer = new Analyzer() {
@Override
@@ -64,7 +67,8 @@ public class TestPhraseQuery extends LuceneTestCase {
return 100;
}
};
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
+ RandomIndexWriter writer = new RandomIndexWriter(random, directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
Document doc = new Document();
doc.add(new Field("field", "one two three four five", Field.Store.YES, Field.Index.ANALYZED));
@@ -82,16 +86,17 @@ public class TestPhraseQuery extends LuceneTestCase {
doc.add(new Field("nonexist", "phrase exist notexist exist found", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
- writer.optimize();
+ reader = writer.getReader();
writer.close();
- searcher = new IndexSearcher(directory, true);
+ searcher = new IndexSearcher(reader);
query = new PhraseQuery();
}
@Override
protected void tearDown() throws Exception {
searcher.close();
+ reader.close();
directory.close();
super.tearDown();
}
@@ -211,14 +216,15 @@ public class TestPhraseQuery extends LuceneTestCase {
public void testPhraseQueryWithStopAnalyzer() throws Exception {
RAMDirectory directory = new RAMDirectory();
Analyzer stopAnalyzer = new MockAnalyzer(MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, false);
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(
- Version.LUCENE_24, stopAnalyzer));
+ RandomIndexWriter writer = new RandomIndexWriter(random, directory,
+ new IndexWriterConfig(Version.LUCENE_24, stopAnalyzer));
Document doc = new Document();
doc.add(new Field("field", "the stop words are here", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
+ IndexReader reader = writer.getReader();
writer.close();
- IndexSearcher searcher = new IndexSearcher(directory, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
// valid exact phrase query
PhraseQuery query = new PhraseQuery();
@@ -239,11 +245,14 @@ public class TestPhraseQuery extends LuceneTestCase {
searcher.close();
+ reader.close();
+ directory.close();
}
public void testPhraseQueryInConjunctionScorer() throws Exception {
RAMDirectory directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ RandomIndexWriter writer = new RandomIndexWriter(random, directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("source", "marketing info", Field.Store.YES, Field.Index.ANALYZED));
@@ -254,10 +263,10 @@ public class TestPhraseQuery extends LuceneTestCase {
doc.add(new Field("source", "marketing info", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
- writer.optimize();
+ IndexReader reader = writer.getReader();
writer.close();
- IndexSearcher searcher = new IndexSearcher(directory, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
PhraseQuery phraseQuery = new PhraseQuery();
phraseQuery.add(new Term("source", "marketing"));
@@ -277,8 +286,10 @@ public class TestPhraseQuery extends LuceneTestCase {
searcher.close();
+ reader.close();
- writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
+ writer = new RandomIndexWriter(random, directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
doc = new Document();
doc.add(new Field("contents", "map entry woo", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
@@ -291,10 +302,10 @@ public class TestPhraseQuery extends LuceneTestCase {
doc.add(new Field("contents", "map foobarword entry woo", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
- writer.optimize();
+ reader = writer.getReader();
writer.close();
- searcher = new IndexSearcher(directory, true);
+ searcher = new IndexSearcher(reader);
termQuery = new TermQuery(new Term("contents","woo"));
phraseQuery = new PhraseQuery();
@@ -322,12 +333,14 @@ public class TestPhraseQuery extends LuceneTestCase {
searcher.close();
+ reader.close();
directory.close();
}
public void testSlopScoring() throws IOException {
Directory directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ RandomIndexWriter writer = new RandomIndexWriter(random, directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "foo firstname lastname foo", Field.Store.YES, Field.Index.ANALYZED));
@@ -341,10 +354,10 @@ public class TestPhraseQuery extends LuceneTestCase {
doc3.add(new Field("field", "foo firstname zzz yyy lastname foo", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc3);
- writer.optimize();
+ IndexReader reader = writer.getReader();
writer.close();
- Searcher searcher = new IndexSearcher(directory, true);
+ Searcher searcher = new IndexSearcher(reader);
PhraseQuery query = new PhraseQuery();
query.add(new Term("field", "firstname"));
query.add(new Term("field", "lastname"));
@@ -359,7 +372,10 @@ public class TestPhraseQuery extends LuceneTestCase {
assertEquals(1, hits[1].doc);
assertEquals(0.31, hits[2].score, 0.01);
assertEquals(2, hits[2].doc);
- QueryUtils.check(query,searcher);
+ QueryUtils.check(query,searcher);
+ searcher.close();
+ reader.close();
+ directory.close();
}
public void testToString() throws Exception {
@@ -587,13 +603,14 @@ public class TestPhraseQuery extends LuceneTestCase {
Directory dir = new MockRAMDirectory();
Analyzer analyzer = new MockAnalyzer();
- IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
+ RandomIndexWriter w = new RandomIndexWriter(random, dir,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
List> docs = new ArrayList>();
Document d = new Document();
Field f = new Field("f", "", Field.Store.NO, Field.Index.ANALYZED);
d.add(f);
- Random r = newRandom();
+ Random r = random;
int NUM_DOCS = 10*_TestUtil.getRandomMultiplier();
for(int i=0;i
- * NOTE: at the moment, this class only tests for 'positive' results,
- * it does not verify the results to ensure there are no 'false positives',
- * nor does it adequately test 'negative' results. It also does not test
- * that garbage in results in an Exception.
+ * NOTE: at the moment, this class only tests for 'positive' results, it does
+ * not verify the results to ensure there are no 'false positives', nor does it
+ * adequately test 'negative' results. It also does not test that garbage in
+ * results in an Exception.
*/
public class TestTermRangeFilter extends BaseTestRangeFilter {
-
- public TestTermRangeFilter(String name) {
- super(name);
- }
- public TestTermRangeFilter() {
- super();
- }
-
- public void testRangeFilterId() throws IOException {
-
- IndexReader reader = IndexReader.open(signedIndex.index, true);
- IndexSearcher search = new IndexSearcher(reader);
-
- int medId = ((maxId - minId) / 2);
-
- String minIP = pad(minId);
- String maxIP = pad(maxId);
- String medIP = pad(medId);
+
+ public void testRangeFilterId() throws IOException {
- int numDocs = reader.numDocs();
-
- assertEquals("num of docs", numDocs, 1+ maxId - minId);
-
- ScoreDoc[] result;
- Query q = new TermQuery(new Term("body","body"));
-
- // test id, bounded on both ends
-
- result = search.search(q,new TermRangeFilter("id",minIP,maxIP,T,T), numDocs).scoreDocs;
- assertEquals("find all", numDocs, result.length);
-
- result = search.search(q,new TermRangeFilter("id",minIP,maxIP,T,F), numDocs).scoreDocs;
- assertEquals("all but last", numDocs-1, result.length);
-
- result = search.search(q,new TermRangeFilter("id",minIP,maxIP,F,T), numDocs).scoreDocs;
- assertEquals("all but first", numDocs-1, result.length);
-
- result = search.search(q,new TermRangeFilter("id",minIP,maxIP,F,F), numDocs).scoreDocs;
- assertEquals("all but ends", numDocs-2, result.length);
+ IndexReader reader = signedIndexReader;
+ IndexSearcher search = new IndexSearcher(reader);
- result = search.search(q,new TermRangeFilter("id",medIP,maxIP,T,T), numDocs).scoreDocs;
- assertEquals("med and up", 1+ maxId-medId, result.length);
-
- result = search.search(q,new TermRangeFilter("id",minIP,medIP,T,T), numDocs).scoreDocs;
- assertEquals("up to med", 1+ medId-minId, result.length);
-
- // unbounded id
-
- result = search.search(q,new TermRangeFilter("id",minIP,null,T,F), numDocs).scoreDocs;
- assertEquals("min and up", numDocs, result.length);
-
- result = search.search(q,new TermRangeFilter("id",null,maxIP,F,T), numDocs).scoreDocs;
- assertEquals("max and down", numDocs, result.length);
-
- result = search.search(q,new TermRangeFilter("id",minIP,null,F,F), numDocs).scoreDocs;
- assertEquals("not min, but up", numDocs-1, result.length);
-
- result = search.search(q,new TermRangeFilter("id",null,maxIP,F,F), numDocs).scoreDocs;
- assertEquals("not max, but down", numDocs-1, result.length);
-
- result = search.search(q,new TermRangeFilter("id",medIP,maxIP,T,F), numDocs).scoreDocs;
- assertEquals("med and up, not max", maxId-medId, result.length);
-
- result = search.search(q,new TermRangeFilter("id",minIP,medIP,F,T), numDocs).scoreDocs;
- assertEquals("not min, up to med", medId-minId, result.length);
-
- // very small sets
-
- result = search.search(q,new TermRangeFilter("id",minIP,minIP,F,F), numDocs).scoreDocs;
- assertEquals("min,min,F,F", 0, result.length);
- result = search.search(q,new TermRangeFilter("id",medIP,medIP,F,F), numDocs).scoreDocs;
- assertEquals("med,med,F,F", 0, result.length);
- result = search.search(q,new TermRangeFilter("id",maxIP,maxIP,F,F), numDocs).scoreDocs;
- assertEquals("max,max,F,F", 0, result.length);
-
- result = search.search(q,new TermRangeFilter("id",minIP,minIP,T,T), numDocs).scoreDocs;
- assertEquals("min,min,T,T", 1, result.length);
- result = search.search(q,new TermRangeFilter("id",null,minIP,F,T), numDocs).scoreDocs;
- assertEquals("nul,min,F,T", 1, result.length);
-
- result = search.search(q,new TermRangeFilter("id",maxIP,maxIP,T,T), numDocs).scoreDocs;
- assertEquals("max,max,T,T", 1, result.length);
- result = search.search(q,new TermRangeFilter("id",maxIP,null,T,F), numDocs).scoreDocs;
- assertEquals("max,nul,T,T", 1, result.length);
-
- result = search.search(q,new TermRangeFilter("id",medIP,medIP,T,T), numDocs).scoreDocs;
- assertEquals("med,med,T,T", 1, result.length);
-
- }
-
- public void testRangeFilterIdCollating() throws IOException {
-
- IndexReader reader = IndexReader.open(signedIndex.index, true);
- IndexSearcher search = new IndexSearcher(reader);
-
- Collator c = Collator.getInstance(Locale.ENGLISH);
-
- int medId = ((maxId - minId) / 2);
-
- String minIP = pad(minId);
- String maxIP = pad(maxId);
- String medIP = pad(medId);
-
- int numDocs = reader.numDocs();
-
- assertEquals("num of docs", numDocs, 1+ maxId - minId);
-
- Query q = new TermQuery(new Term("body","body"));
-
- // test id, bounded on both ends
- int numHits = search.search(q,new TermRangeFilter("id",minIP,maxIP,T,T,c), 1000).totalHits;
- assertEquals("find all", numDocs, numHits);
-
- numHits = search.search(q,new TermRangeFilter("id",minIP,maxIP,T,F,c), 1000).totalHits;
- assertEquals("all but last", numDocs-1, numHits);
-
- numHits = search.search(q,new TermRangeFilter("id",minIP,maxIP,F,T,c), 1000).totalHits;
- assertEquals("all but first", numDocs-1, numHits);
-
- numHits = search.search(q,new TermRangeFilter("id",minIP,maxIP,F,F,c), 1000).totalHits;
- assertEquals("all but ends", numDocs-2, numHits);
-
- numHits = search.search(q,new TermRangeFilter("id",medIP,maxIP,T,T,c), 1000).totalHits;
- assertEquals("med and up", 1+ maxId-medId, numHits);
-
- numHits = search.search(q,new TermRangeFilter("id",minIP,medIP,T,T,c), 1000).totalHits;
- assertEquals("up to med", 1+ medId-minId, numHits);
-
- // unbounded id
-
- numHits = search.search(q,new TermRangeFilter("id",minIP,null,T,F,c), 1000).totalHits;
- assertEquals("min and up", numDocs, numHits);
-
- numHits = search.search(q,new TermRangeFilter("id",null,maxIP,F,T,c), 1000).totalHits;
- assertEquals("max and down", numDocs, numHits);
-
- numHits = search.search(q,new TermRangeFilter("id",minIP,null,F,F,c), 1000).totalHits;
- assertEquals("not min, but up", numDocs-1, numHits);
-
- numHits = search.search(q,new TermRangeFilter("id",null,maxIP,F,F,c), 1000).totalHits;
- assertEquals("not max, but down", numDocs-1, numHits);
-
- numHits = search.search(q,new TermRangeFilter("id",medIP,maxIP,T,F,c), 1000).totalHits;
- assertEquals("med and up, not max", maxId-medId, numHits);
-
- numHits = search.search(q,new TermRangeFilter("id",minIP,medIP,F,T,c), 1000).totalHits;
- assertEquals("not min, up to med", medId-minId, numHits);
-
- // very small sets
-
- numHits = search.search(q,new TermRangeFilter("id",minIP,minIP,F,F,c), 1000).totalHits;
- assertEquals("min,min,F,F", 0, numHits);
- numHits = search.search(q,new TermRangeFilter("id",medIP,medIP,F,F,c), 1000).totalHits;
- assertEquals("med,med,F,F", 0, numHits);
- numHits = search.search(q,new TermRangeFilter("id",maxIP,maxIP,F,F,c), 1000).totalHits;
- assertEquals("max,max,F,F", 0, numHits);
-
- numHits = search.search(q,new TermRangeFilter("id",minIP,minIP,T,T,c), 1000).totalHits;
- assertEquals("min,min,T,T", 1, numHits);
- numHits = search.search(q,new TermRangeFilter("id",null,minIP,F,T,c), 1000).totalHits;
- assertEquals("nul,min,F,T", 1, numHits);
-
- numHits = search.search(q,new TermRangeFilter("id",maxIP,maxIP,T,T,c), 1000).totalHits;
- assertEquals("max,max,T,T", 1, numHits);
- numHits = search.search(q,new TermRangeFilter("id",maxIP,null,T,F,c), 1000).totalHits;
- assertEquals("max,nul,T,T", 1, numHits);
-
- numHits = search.search(q,new TermRangeFilter("id",medIP,medIP,T,T,c), 1000).totalHits;
- assertEquals("med,med,T,T", 1, numHits);
- }
-
- public void testRangeFilterRand() throws IOException {
-
- IndexReader reader = IndexReader.open(signedIndex.index, true);
- IndexSearcher search = new IndexSearcher(reader);
-
- String minRP = pad(signedIndex.minR);
- String maxRP = pad(signedIndex.maxR);
+ int medId = ((maxId - minId) / 2);
- int numDocs = reader.numDocs();
-
- assertEquals("num of docs", numDocs, 1+ maxId - minId);
-
- ScoreDoc[] result;
- Query q = new TermQuery(new Term("body","body"));
-
- // test extremes, bounded on both ends
-
- result = search.search(q,new TermRangeFilter("rand",minRP,maxRP,T,T), numDocs).scoreDocs;
- assertEquals("find all", numDocs, result.length);
-
- result = search.search(q,new TermRangeFilter("rand",minRP,maxRP,T,F), numDocs).scoreDocs;
- assertEquals("all but biggest", numDocs-1, result.length);
-
- result = search.search(q,new TermRangeFilter("rand",minRP,maxRP,F,T), numDocs).scoreDocs;
- assertEquals("all but smallest", numDocs-1, result.length);
-
- result = search.search(q,new TermRangeFilter("rand",minRP,maxRP,F,F), numDocs).scoreDocs;
- assertEquals("all but extremes", numDocs-2, result.length);
+ String minIP = pad(minId);
+ String maxIP = pad(maxId);
+ String medIP = pad(medId);
- // unbounded
-
- result = search.search(q,new TermRangeFilter("rand",minRP,null,T,F), numDocs).scoreDocs;
- assertEquals("smallest and up", numDocs, result.length);
-
- result = search.search(q,new TermRangeFilter("rand",null,maxRP,F,T), numDocs).scoreDocs;
- assertEquals("biggest and down", numDocs, result.length);
-
- result = search.search(q,new TermRangeFilter("rand",minRP,null,F,F), numDocs).scoreDocs;
- assertEquals("not smallest, but up", numDocs-1, result.length);
-
- result = search.search(q,new TermRangeFilter("rand",null,maxRP,F,F), numDocs).scoreDocs;
- assertEquals("not biggest, but down", numDocs-1, result.length);
-
- // very small sets
-
- result = search.search(q,new TermRangeFilter("rand",minRP,minRP,F,F), numDocs).scoreDocs;
- assertEquals("min,min,F,F", 0, result.length);
- result = search.search(q,new TermRangeFilter("rand",maxRP,maxRP,F,F), numDocs).scoreDocs;
- assertEquals("max,max,F,F", 0, result.length);
-
- result = search.search(q,new TermRangeFilter("rand",minRP,minRP,T,T), numDocs).scoreDocs;
- assertEquals("min,min,T,T", 1, result.length);
- result = search.search(q,new TermRangeFilter("rand",null,minRP,F,T), numDocs).scoreDocs;
- assertEquals("nul,min,F,T", 1, result.length);
-
- result = search.search(q,new TermRangeFilter("rand",maxRP,maxRP,T,T), numDocs).scoreDocs;
- assertEquals("max,max,T,T", 1, result.length);
- result = search.search(q,new TermRangeFilter("rand",maxRP,null,T,F), numDocs).scoreDocs;
- assertEquals("max,nul,T,T", 1, result.length);
-
- }
-
- public void testRangeFilterRandCollating() throws IOException {
-
- // using the unsigned index because collation seems to ignore hyphens
- IndexReader reader = IndexReader.open(unsignedIndex.index, true);
- IndexSearcher search = new IndexSearcher(reader);
-
- Collator c = Collator.getInstance(Locale.ENGLISH);
-
- String minRP = pad(unsignedIndex.minR);
- String maxRP = pad(unsignedIndex.maxR);
-
- int numDocs = reader.numDocs();
-
- assertEquals("num of docs", numDocs, 1+ maxId - minId);
-
- Query q = new TermQuery(new Term("body","body"));
-
- // test extremes, bounded on both ends
-
- int numHits = search.search(q,new TermRangeFilter("rand",minRP,maxRP,T,T,c), 1000).totalHits;
- assertEquals("find all", numDocs, numHits);
-
- numHits = search.search(q,new TermRangeFilter("rand",minRP,maxRP,T,F,c), 1000).totalHits;
- assertEquals("all but biggest", numDocs-1, numHits);
-
- numHits = search.search(q,new TermRangeFilter("rand",minRP,maxRP,F,T,c), 1000).totalHits;
- assertEquals("all but smallest", numDocs-1, numHits);
-
- numHits = search.search(q,new TermRangeFilter("rand",minRP,maxRP,F,F,c), 1000).totalHits;
- assertEquals("all but extremes", numDocs-2, numHits);
-
- // unbounded
-
- numHits = search.search(q,new TermRangeFilter("rand",minRP,null,T,F,c), 1000).totalHits;
- assertEquals("smallest and up", numDocs, numHits);
-
- numHits = search.search(q,new TermRangeFilter("rand",null,maxRP,F,T,c), 1000).totalHits;
- assertEquals("biggest and down", numDocs, numHits);
-
- numHits = search.search(q,new TermRangeFilter("rand",minRP,null,F,F,c), 1000).totalHits;
- assertEquals("not smallest, but up", numDocs-1, numHits);
-
- numHits = search.search(q,new TermRangeFilter("rand",null,maxRP,F,F,c), 1000).totalHits;
- assertEquals("not biggest, but down", numDocs-1, numHits);
-
- // very small sets
-
- numHits = search.search(q,new TermRangeFilter("rand",minRP,minRP,F,F,c), 1000).totalHits;
- assertEquals("min,min,F,F", 0, numHits);
- numHits = search.search(q,new TermRangeFilter("rand",maxRP,maxRP,F,F,c), 1000).totalHits;
- assertEquals("max,max,F,F", 0, numHits);
-
- numHits = search.search(q,new TermRangeFilter("rand",minRP,minRP,T,T,c), 1000).totalHits;
- assertEquals("min,min,T,T", 1, numHits);
- numHits = search.search(q,new TermRangeFilter("rand",null,minRP,F,T,c), 1000).totalHits;
- assertEquals("nul,min,F,T", 1, numHits);
-
- numHits = search.search(q,new TermRangeFilter("rand",maxRP,maxRP,T,T,c), 1000).totalHits;
- assertEquals("max,max,T,T", 1, numHits);
- numHits = search.search(q,new TermRangeFilter("rand",maxRP,null,T,F,c), 1000).totalHits;
- assertEquals("max,nul,T,T", 1, numHits);
- }
+ int numDocs = reader.numDocs();
- public void testFarsi() throws Exception {
-
- /* build an index */
- RAMDirectory farsiIndex = new RAMDirectory();
- IndexWriter writer = new IndexWriter(farsiIndex, new IndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer()));
- Document doc = new Document();
- doc.add(new Field("content","\u0633\u0627\u0628",
- Field.Store.YES, Field.Index.NOT_ANALYZED));
- doc.add(new Field("body", "body",
- Field.Store.YES, Field.Index.NOT_ANALYZED));
- writer.addDocument(doc);
-
- writer.optimize();
- writer.close();
-
- IndexReader reader = IndexReader.open(farsiIndex, true);
- IndexSearcher search = new IndexSearcher(reader);
- Query q = new TermQuery(new Term("body","body"));
-
- // Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in
- // RuleBasedCollator. However, the Arabic Locale seems to order the Farsi
- // characters properly.
- Collator collator = Collator.getInstance(new Locale("ar"));
-
- // Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
- // orders the U+0698 character before the U+0633 character, so the single
- // index Term below should NOT be returned by a TermRangeFilter with a Farsi
- // Collator (or an Arabic one for the case when Farsi is not supported).
- int numHits = search.search
- (q, new TermRangeFilter("content", "\u062F", "\u0698", T, T, collator), 1000).totalHits;
- assertEquals("The index Term should not be included.", 0, numHits);
-
- numHits = search.search
- (q, new TermRangeFilter("content", "\u0633", "\u0638", T, T, collator), 1000).totalHits;
- assertEquals("The index Term should be included.", 1, numHits);
- search.close();
- }
-
- public void testDanish() throws Exception {
-
- /* build an index */
- RAMDirectory danishIndex = new RAMDirectory();
- IndexWriter writer = new IndexWriter(danishIndex, new IndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer()));
- // Danish collation orders the words below in the given order
- // (example taken from TestSort.testInternationalSort() ).
- String[] words = { "H\u00D8T", "H\u00C5T", "MAND" };
- for (int docnum = 0 ; docnum < words.length ; ++docnum) {
- Document doc = new Document();
- doc.add(new Field("content", words[docnum],
- Field.Store.YES, Field.Index.NOT_ANALYZED));
- doc.add(new Field("body", "body",
- Field.Store.YES, Field.Index.NOT_ANALYZED));
- writer.addDocument(doc);
- }
- writer.optimize();
- writer.close();
-
- IndexReader reader = IndexReader.open(danishIndex, true);
- IndexSearcher search = new IndexSearcher(reader);
- Query q = new TermQuery(new Term("body","body"));
-
- Collator collator = Collator.getInstance(new Locale("da", "dk"));
-
- // Unicode order would not include "H\u00C5T" in [ "H\u00D8T", "MAND" ],
- // but Danish collation does.
- int numHits = search.search
- (q, new TermRangeFilter("content", "H\u00D8T", "MAND", F, F, collator), 1000).totalHits;
- assertEquals("The index Term should be included.", 1, numHits);
-
- numHits = search.search
- (q, new TermRangeFilter("content", "H\u00C5T", "MAND", F, F, collator), 1000).totalHits;
- assertEquals
- ("The index Term should not be included.", 0, numHits);
- search.close();
+ assertEquals("num of docs", numDocs, 1 + maxId - minId);
+
+ ScoreDoc[] result;
+ Query q = new TermQuery(new Term("body", "body"));
+
+ // test id, bounded on both ends
+
+ result = search.search(q, new TermRangeFilter("id", minIP, maxIP, T, T),
+ numDocs).scoreDocs;
+ assertEquals("find all", numDocs, result.length);
+
+ result = search.search(q, new TermRangeFilter("id", minIP, maxIP, T, F),
+ numDocs).scoreDocs;
+ assertEquals("all but last", numDocs - 1, result.length);
+
+ result = search.search(q, new TermRangeFilter("id", minIP, maxIP, F, T),
+ numDocs).scoreDocs;
+ assertEquals("all but first", numDocs - 1, result.length);
+
+ result = search.search(q, new TermRangeFilter("id", minIP, maxIP, F, F),
+ numDocs).scoreDocs;
+ assertEquals("all but ends", numDocs - 2, result.length);
+
+ result = search.search(q, new TermRangeFilter("id", medIP, maxIP, T, T),
+ numDocs).scoreDocs;
+ assertEquals("med and up", 1 + maxId - medId, result.length);
+
+ result = search.search(q, new TermRangeFilter("id", minIP, medIP, T, T),
+ numDocs).scoreDocs;
+ assertEquals("up to med", 1 + medId - minId, result.length);
+
+ // unbounded id
+
+ result = search.search(q, new TermRangeFilter("id", minIP, null, T, F),
+ numDocs).scoreDocs;
+ assertEquals("min and up", numDocs, result.length);
+
+ result = search.search(q, new TermRangeFilter("id", null, maxIP, F, T),
+ numDocs).scoreDocs;
+ assertEquals("max and down", numDocs, result.length);
+
+ result = search.search(q, new TermRangeFilter("id", minIP, null, F, F),
+ numDocs).scoreDocs;
+ assertEquals("not min, but up", numDocs - 1, result.length);
+
+ result = search.search(q, new TermRangeFilter("id", null, maxIP, F, F),
+ numDocs).scoreDocs;
+ assertEquals("not max, but down", numDocs - 1, result.length);
+
+ result = search.search(q, new TermRangeFilter("id", medIP, maxIP, T, F),
+ numDocs).scoreDocs;
+ assertEquals("med and up, not max", maxId - medId, result.length);
+
+ result = search.search(q, new TermRangeFilter("id", minIP, medIP, F, T),
+ numDocs).scoreDocs;
+ assertEquals("not min, up to med", medId - minId, result.length);
+
+ // very small sets
+
+ result = search.search(q, new TermRangeFilter("id", minIP, minIP, F, F),
+ numDocs).scoreDocs;
+ assertEquals("min,min,F,F", 0, result.length);
+ result = search.search(q, new TermRangeFilter("id", medIP, medIP, F, F),
+ numDocs).scoreDocs;
+ assertEquals("med,med,F,F", 0, result.length);
+ result = search.search(q, new TermRangeFilter("id", maxIP, maxIP, F, F),
+ numDocs).scoreDocs;
+ assertEquals("max,max,F,F", 0, result.length);
+
+ result = search.search(q, new TermRangeFilter("id", minIP, minIP, T, T),
+ numDocs).scoreDocs;
+ assertEquals("min,min,T,T", 1, result.length);
+ result = search.search(q, new TermRangeFilter("id", null, minIP, F, T),
+ numDocs).scoreDocs;
+ assertEquals("nul,min,F,T", 1, result.length);
+
+ result = search.search(q, new TermRangeFilter("id", maxIP, maxIP, T, T),
+ numDocs).scoreDocs;
+ assertEquals("max,max,T,T", 1, result.length);
+ result = search.search(q, new TermRangeFilter("id", maxIP, null, T, F),
+ numDocs).scoreDocs;
+ assertEquals("max,nul,T,T", 1, result.length);
+
+ result = search.search(q, new TermRangeFilter("id", medIP, medIP, T, T),
+ numDocs).scoreDocs;
+ assertEquals("med,med,T,T", 1, result.length);
+
+ }
+
+ public void testRangeFilterIdCollating() throws IOException {
+
+ IndexReader reader = signedIndexReader;
+ IndexSearcher search = new IndexSearcher(reader);
+
+ Collator c = Collator.getInstance(Locale.ENGLISH);
+
+ int medId = ((maxId - minId) / 2);
+
+ String minIP = pad(minId);
+ String maxIP = pad(maxId);
+ String medIP = pad(medId);
+
+ int numDocs = reader.numDocs();
+
+ assertEquals("num of docs", numDocs, 1 + maxId - minId);
+
+ Query q = new TermQuery(new Term("body", "body"));
+
+ // test id, bounded on both ends
+ int numHits = search.search(q, new TermRangeFilter("id", minIP, maxIP, T,
+ T, c), 1000).totalHits;
+ assertEquals("find all", numDocs, numHits);
+
+ numHits = search.search(q,
+ new TermRangeFilter("id", minIP, maxIP, T, F, c), 1000).totalHits;
+ assertEquals("all but last", numDocs - 1, numHits);
+
+ numHits = search.search(q,
+ new TermRangeFilter("id", minIP, maxIP, F, T, c), 1000).totalHits;
+ assertEquals("all but first", numDocs - 1, numHits);
+
+ numHits = search.search(q,
+ new TermRangeFilter("id", minIP, maxIP, F, F, c), 1000).totalHits;
+ assertEquals("all but ends", numDocs - 2, numHits);
+
+ numHits = search.search(q,
+ new TermRangeFilter("id", medIP, maxIP, T, T, c), 1000).totalHits;
+ assertEquals("med and up", 1 + maxId - medId, numHits);
+
+ numHits = search.search(q,
+ new TermRangeFilter("id", minIP, medIP, T, T, c), 1000).totalHits;
+ assertEquals("up to med", 1 + medId - minId, numHits);
+
+ // unbounded id
+
+ numHits = search.search(q, new TermRangeFilter("id", minIP, null, T, F, c),
+ 1000).totalHits;
+ assertEquals("min and up", numDocs, numHits);
+
+ numHits = search.search(q, new TermRangeFilter("id", null, maxIP, F, T, c),
+ 1000).totalHits;
+ assertEquals("max and down", numDocs, numHits);
+
+ numHits = search.search(q, new TermRangeFilter("id", minIP, null, F, F, c),
+ 1000).totalHits;
+ assertEquals("not min, but up", numDocs - 1, numHits);
+
+ numHits = search.search(q, new TermRangeFilter("id", null, maxIP, F, F, c),
+ 1000).totalHits;
+ assertEquals("not max, but down", numDocs - 1, numHits);
+
+ numHits = search.search(q,
+ new TermRangeFilter("id", medIP, maxIP, T, F, c), 1000).totalHits;
+ assertEquals("med and up, not max", maxId - medId, numHits);
+
+ numHits = search.search(q,
+ new TermRangeFilter("id", minIP, medIP, F, T, c), 1000).totalHits;
+ assertEquals("not min, up to med", medId - minId, numHits);
+
+ // very small sets
+
+ numHits = search.search(q,
+ new TermRangeFilter("id", minIP, minIP, F, F, c), 1000).totalHits;
+ assertEquals("min,min,F,F", 0, numHits);
+ numHits = search.search(q,
+ new TermRangeFilter("id", medIP, medIP, F, F, c), 1000).totalHits;
+ assertEquals("med,med,F,F", 0, numHits);
+ numHits = search.search(q,
+ new TermRangeFilter("id", maxIP, maxIP, F, F, c), 1000).totalHits;
+ assertEquals("max,max,F,F", 0, numHits);
+
+ numHits = search.search(q,
+ new TermRangeFilter("id", minIP, minIP, T, T, c), 1000).totalHits;
+ assertEquals("min,min,T,T", 1, numHits);
+ numHits = search.search(q, new TermRangeFilter("id", null, minIP, F, T, c),
+ 1000).totalHits;
+ assertEquals("nul,min,F,T", 1, numHits);
+
+ numHits = search.search(q,
+ new TermRangeFilter("id", maxIP, maxIP, T, T, c), 1000).totalHits;
+ assertEquals("max,max,T,T", 1, numHits);
+ numHits = search.search(q, new TermRangeFilter("id", maxIP, null, T, F, c),
+ 1000).totalHits;
+ assertEquals("max,nul,T,T", 1, numHits);
+
+ numHits = search.search(q,
+ new TermRangeFilter("id", medIP, medIP, T, T, c), 1000).totalHits;
+ assertEquals("med,med,T,T", 1, numHits);
+ }
+
+ public void testRangeFilterRand() throws IOException {
+
+ IndexReader reader = signedIndexReader;
+ IndexSearcher search = new IndexSearcher(reader);
+
+ String minRP = pad(signedIndexDir.minR);
+ String maxRP = pad(signedIndexDir.maxR);
+
+ int numDocs = reader.numDocs();
+
+ assertEquals("num of docs", numDocs, 1 + maxId - minId);
+
+ ScoreDoc[] result;
+ Query q = new TermQuery(new Term("body", "body"));
+
+ // test extremes, bounded on both ends
+
+ result = search.search(q, new TermRangeFilter("rand", minRP, maxRP, T, T),
+ numDocs).scoreDocs;
+ assertEquals("find all", numDocs, result.length);
+
+ result = search.search(q, new TermRangeFilter("rand", minRP, maxRP, T, F),
+ numDocs).scoreDocs;
+ assertEquals("all but biggest", numDocs - 1, result.length);
+
+ result = search.search(q, new TermRangeFilter("rand", minRP, maxRP, F, T),
+ numDocs).scoreDocs;
+ assertEquals("all but smallest", numDocs - 1, result.length);
+
+ result = search.search(q, new TermRangeFilter("rand", minRP, maxRP, F, F),
+ numDocs).scoreDocs;
+ assertEquals("all but extremes", numDocs - 2, result.length);
+
+ // unbounded
+
+ result = search.search(q, new TermRangeFilter("rand", minRP, null, T, F),
+ numDocs).scoreDocs;
+ assertEquals("smallest and up", numDocs, result.length);
+
+ result = search.search(q, new TermRangeFilter("rand", null, maxRP, F, T),
+ numDocs).scoreDocs;
+ assertEquals("biggest and down", numDocs, result.length);
+
+ result = search.search(q, new TermRangeFilter("rand", minRP, null, F, F),
+ numDocs).scoreDocs;
+ assertEquals("not smallest, but up", numDocs - 1, result.length);
+
+ result = search.search(q, new TermRangeFilter("rand", null, maxRP, F, F),
+ numDocs).scoreDocs;
+ assertEquals("not biggest, but down", numDocs - 1, result.length);
+
+ // very small sets
+
+ result = search.search(q, new TermRangeFilter("rand", minRP, minRP, F, F),
+ numDocs).scoreDocs;
+ assertEquals("min,min,F,F", 0, result.length);
+ result = search.search(q, new TermRangeFilter("rand", maxRP, maxRP, F, F),
+ numDocs).scoreDocs;
+ assertEquals("max,max,F,F", 0, result.length);
+
+ result = search.search(q, new TermRangeFilter("rand", minRP, minRP, T, T),
+ numDocs).scoreDocs;
+ assertEquals("min,min,T,T", 1, result.length);
+ result = search.search(q, new TermRangeFilter("rand", null, minRP, F, T),
+ numDocs).scoreDocs;
+ assertEquals("nul,min,F,T", 1, result.length);
+
+ result = search.search(q, new TermRangeFilter("rand", maxRP, maxRP, T, T),
+ numDocs).scoreDocs;
+ assertEquals("max,max,T,T", 1, result.length);
+ result = search.search(q, new TermRangeFilter("rand", maxRP, null, T, F),
+ numDocs).scoreDocs;
+ assertEquals("max,nul,T,T", 1, result.length);
+
+ }
+
+ public void testRangeFilterRandCollating() throws IOException {
+
+ // using the unsigned index because collation seems to ignore hyphens
+ IndexReader reader = unsignedIndexReader;
+ IndexSearcher search = new IndexSearcher(reader);
+
+ Collator c = Collator.getInstance(Locale.ENGLISH);
+
+ String minRP = pad(unsignedIndexDir.minR);
+ String maxRP = pad(unsignedIndexDir.maxR);
+
+ int numDocs = reader.numDocs();
+
+ assertEquals("num of docs", numDocs, 1 + maxId - minId);
+
+ Query q = new TermQuery(new Term("body", "body"));
+
+ // test extremes, bounded on both ends
+
+ int numHits = search.search(q, new TermRangeFilter("rand", minRP, maxRP, T,
+ T, c), 1000).totalHits;
+ assertEquals("find all", numDocs, numHits);
+
+ numHits = search.search(q, new TermRangeFilter("rand", minRP, maxRP, T, F,
+ c), 1000).totalHits;
+ assertEquals("all but biggest", numDocs - 1, numHits);
+
+ numHits = search.search(q, new TermRangeFilter("rand", minRP, maxRP, F, T,
+ c), 1000).totalHits;
+ assertEquals("all but smallest", numDocs - 1, numHits);
+
+ numHits = search.search(q, new TermRangeFilter("rand", minRP, maxRP, F, F,
+ c), 1000).totalHits;
+ assertEquals("all but extremes", numDocs - 2, numHits);
+
+ // unbounded
+
+ numHits = search.search(q,
+ new TermRangeFilter("rand", minRP, null, T, F, c), 1000).totalHits;
+ assertEquals("smallest and up", numDocs, numHits);
+
+ numHits = search.search(q,
+ new TermRangeFilter("rand", null, maxRP, F, T, c), 1000).totalHits;
+ assertEquals("biggest and down", numDocs, numHits);
+
+ numHits = search.search(q,
+ new TermRangeFilter("rand", minRP, null, F, F, c), 1000).totalHits;
+ assertEquals("not smallest, but up", numDocs - 1, numHits);
+
+ numHits = search.search(q,
+ new TermRangeFilter("rand", null, maxRP, F, F, c), 1000).totalHits;
+ assertEquals("not biggest, but down", numDocs - 1, numHits);
+
+ // very small sets
+
+ numHits = search.search(q, new TermRangeFilter("rand", minRP, minRP, F, F,
+ c), 1000).totalHits;
+ assertEquals("min,min,F,F", 0, numHits);
+ numHits = search.search(q, new TermRangeFilter("rand", maxRP, maxRP, F, F,
+ c), 1000).totalHits;
+ assertEquals("max,max,F,F", 0, numHits);
+
+ numHits = search.search(q, new TermRangeFilter("rand", minRP, minRP, T, T,
+ c), 1000).totalHits;
+ assertEquals("min,min,T,T", 1, numHits);
+ numHits = search.search(q,
+ new TermRangeFilter("rand", null, minRP, F, T, c), 1000).totalHits;
+ assertEquals("nul,min,F,T", 1, numHits);
+
+ numHits = search.search(q, new TermRangeFilter("rand", maxRP, maxRP, T, T,
+ c), 1000).totalHits;
+ assertEquals("max,max,T,T", 1, numHits);
+ numHits = search.search(q,
+ new TermRangeFilter("rand", maxRP, null, T, F, c), 1000).totalHits;
+ assertEquals("max,nul,T,T", 1, numHits);
+ }
+
+ public void testFarsi() throws Exception {
+
+ /* build an index */
+ RAMDirectory farsiIndex = new RAMDirectory();
+ RandomIndexWriter writer = new RandomIndexWriter(rand, farsiIndex,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ Document doc = new Document();
+ doc.add(new Field("content", "\u0633\u0627\u0628", Field.Store.YES,
+ Field.Index.NOT_ANALYZED));
+ doc
+ .add(new Field("body", "body", Field.Store.YES,
+ Field.Index.NOT_ANALYZED));
+ writer.addDocument(doc);
+
+ IndexReader reader = writer.getReader();
+ writer.close();
+
+ IndexSearcher search = new IndexSearcher(reader);
+ Query q = new TermQuery(new Term("body", "body"));
+
+ // Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in
+ // RuleBasedCollator. However, the Arabic Locale seems to order the Farsi
+ // characters properly.
+ Collator collator = Collator.getInstance(new Locale("ar"));
+
+ // Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
+ // orders the U+0698 character before the U+0633 character, so the single
+ // index Term below should NOT be returned by a TermRangeFilter with a Farsi
+ // Collator (or an Arabic one for the case when Farsi is not supported).
+ int numHits = search.search(q, new TermRangeFilter("content", "\u062F",
+ "\u0698", T, T, collator), 1000).totalHits;
+ assertEquals("The index Term should not be included.", 0, numHits);
+
+ numHits = search.search(q, new TermRangeFilter("content", "\u0633",
+ "\u0638", T, T, collator), 1000).totalHits;
+ assertEquals("The index Term should be included.", 1, numHits);
+ search.close();
+ reader.close();
+ farsiIndex.close();
+ }
+
+ public void testDanish() throws Exception {
+
+ /* build an index */
+ RAMDirectory danishIndex = new RAMDirectory();
+ RandomIndexWriter writer = new RandomIndexWriter(rand, danishIndex,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ // Danish collation orders the words below in the given order
+ // (example taken from TestSort.testInternationalSort() ).
+ String[] words = {"H\u00D8T", "H\u00C5T", "MAND"};
+ for (int docnum = 0; docnum < words.length; ++docnum) {
+ Document doc = new Document();
+ doc.add(new Field("content", words[docnum], Field.Store.YES,
+ Field.Index.NOT_ANALYZED));
+ doc.add(new Field("body", "body", Field.Store.YES,
+ Field.Index.NOT_ANALYZED));
+ writer.addDocument(doc);
}
+ IndexReader reader = writer.getReader();
+ writer.close();
+
+ IndexSearcher search = new IndexSearcher(reader);
+ Query q = new TermQuery(new Term("body", "body"));
+
+ Collator collator = Collator.getInstance(new Locale("da", "dk"));
+
+ // Unicode order would not include "H\u00C5T" in [ "H\u00D8T", "MAND" ],
+ // but Danish collation does.
+ int numHits = search.search(q, new TermRangeFilter("content", "H\u00D8T",
+ "MAND", F, F, collator), 1000).totalHits;
+ assertEquals("The index Term should be included.", 1, numHits);
+
+ numHits = search.search(q, new TermRangeFilter("content", "H\u00C5T",
+ "MAND", F, F, collator), 1000).totalHits;
+ assertEquals("The index Term should not be included.", 0, numHits);
+ search.close();
+ reader.close();
+ danishIndex.close();
+ }
}
diff --git a/lucene/src/test/org/apache/lucene/search/TestTermScorer.java b/lucene/src/test/org/apache/lucene/search/TestTermScorer.java
index 29836ab3a21..17d0b9ef0fd 100644
--- a/lucene/src/test/org/apache/lucene/search/TestTermScorer.java
+++ b/lucene/src/test/org/apache/lucene/search/TestTermScorer.java
@@ -26,152 +26,155 @@ import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.RAMDirectory;
-public class TestTermScorer extends LuceneTestCase
-{
- protected RAMDirectory directory;
- private static final String FIELD = "field";
-
- protected String[] values = new String[]{"all", "dogs dogs", "like", "playing", "fetch", "all"};
- protected IndexSearcher indexSearcher;
- protected IndexReader indexReader;
-
-
- public TestTermScorer(String s)
- {
- super(s);
+public class TestTermScorer extends LuceneTestCase {
+ protected RAMDirectory directory;
+ private static final String FIELD = "field";
+
+ protected String[] values = new String[] {"all", "dogs dogs", "like",
+ "playing", "fetch", "all"};
+ protected IndexSearcher indexSearcher;
+ protected IndexReader indexReader;
+
+ public TestTermScorer(String s) {
+ super(s);
+ }
+
+ @Override
+ protected void setUp() throws Exception {
+ super.setUp();
+ directory = new RAMDirectory();
+
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ for (int i = 0; i < values.length; i++) {
+ Document doc = new Document();
+ doc
+ .add(new Field(FIELD, values[i], Field.Store.YES,
+ Field.Index.ANALYZED));
+ writer.addDocument(doc);
}
+ indexReader = writer.getReader();
+ writer.close();
+ indexSearcher = new IndexSearcher(indexReader);
+ }
+
+ @Override
+ protected void tearDown() throws Exception {
+ indexSearcher.close();
+ indexReader.close();
+ directory.close();
+ }
+ public void test() throws IOException {
+
+ Term allTerm = new Term(FIELD, "all");
+ TermQuery termQuery = new TermQuery(allTerm);
+
+ Weight weight = termQuery.weight(indexSearcher);
+
+ Scorer ts = weight.scorer(indexSearcher.getIndexReader(), true, true);
+ // we have 2 documents with the term all in them, one document for all the
+ // other values
+ final List docs = new ArrayList();
+ // must call next first
+
+ ts.score(new Collector() {
+ private int base = 0;
+ private Scorer scorer;
+
+ @Override
+ public void setScorer(Scorer scorer) throws IOException {
+ this.scorer = scorer;
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ float score = scorer.score();
+ doc = doc + base;
+ docs.add(new TestHit(doc, score));
+ assertTrue("score " + score + " is not greater than 0", score > 0);
+ assertTrue("Doc: " + doc + " does not equal 0 or doc does not equal 5",
+ doc == 0 || doc == 5);
+ }
+
+ @Override
+ public void setNextReader(IndexReader reader, int docBase) {
+ base = docBase;
+ }
+
+ @Override
+ public boolean acceptsDocsOutOfOrder() {
+ return true;
+ }
+ });
+ assertTrue("docs Size: " + docs.size() + " is not: " + 2, docs.size() == 2);
+ TestHit doc0 = docs.get(0);
+ TestHit doc5 = docs.get(1);
+ // The scores should be the same
+ assertTrue(doc0.score + " does not equal: " + doc5.score,
+ doc0.score == doc5.score);
+ /*
+ * Score should be (based on Default Sim.: All floats are approximate tf = 1
+ * numDocs = 6 docFreq(all) = 2 idf = ln(6/3) + 1 = 1.693147 idf ^ 2 =
+ * 2.8667 boost = 1 lengthNorm = 1 //there is 1 term in every document coord
+ * = 1 sumOfSquaredWeights = (idf * boost) ^ 2 = 1.693147 ^ 2 = 2.8667
+ * queryNorm = 1 / (sumOfSquaredWeights)^0.5 = 1 /(1.693147) = 0.590
+ *
+ * score = 1 * 2.8667 * 1 * 1 * 0.590 = 1.69
+ */
+ assertTrue(doc0.score + " does not equal: " + 1.6931472f,
+ doc0.score == 1.6931472f);
+ }
+
+ public void testNext() throws Exception {
+
+ Term allTerm = new Term(FIELD, "all");
+ TermQuery termQuery = new TermQuery(allTerm);
+
+ Weight weight = termQuery.weight(indexSearcher);
+
+ Scorer ts = weight.scorer(indexSearcher.getIndexReader(), true, true);
+ assertTrue("next did not return a doc",
+ ts.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
+ assertTrue("score is not correct", ts.score() == 1.6931472f);
+ assertTrue("next did not return a doc",
+ ts.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
+ assertTrue("score is not correct", ts.score() == 1.6931472f);
+ assertTrue("next returned a doc and it should not have",
+ ts.nextDoc() == DocIdSetIterator.NO_MORE_DOCS);
+ }
+
+ public void testAdvance() throws Exception {
+
+ Term allTerm = new Term(FIELD, "all");
+ TermQuery termQuery = new TermQuery(allTerm);
+
+ Weight weight = termQuery.weight(indexSearcher);
+
+ Scorer ts = weight.scorer(indexSearcher.getIndexReader(), true, true);
+ assertTrue("Didn't skip", ts.advance(3) != DocIdSetIterator.NO_MORE_DOCS);
+ // The next doc should be doc 5
+ assertTrue("doc should be number 5", ts.docID() == 5);
+ }
+
+ private class TestHit {
+ public int doc;
+ public float score;
+
+ public TestHit(int doc, float score) {
+ this.doc = doc;
+ this.score = score;
+ }
+
@Override
- protected void setUp() throws Exception {
- super.setUp();
- directory = new RAMDirectory();
-
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
- for (int i = 0; i < values.length; i++) {
- Document doc = new Document();
- doc.add(new Field(FIELD, values[i], Field.Store.YES, Field.Index.ANALYZED));
- writer.addDocument(doc);
- }
- writer.close();
- indexSearcher = new IndexSearcher(directory, false);
- indexReader = indexSearcher.getIndexReader();
-
-
+ public String toString() {
+ return "TestHit{" + "doc=" + doc + ", score=" + score + "}";
}
-
- public void test() throws IOException {
-
- Term allTerm = new Term(FIELD, "all");
- TermQuery termQuery = new TermQuery(allTerm);
-
- Weight weight = termQuery.weight(indexSearcher);
-
- Scorer ts = weight.scorer(indexSearcher.getIndexReader(),
- true, true);
- //we have 2 documents with the term all in them, one document for all the other values
- final List docs = new ArrayList();
- //must call next first
-
-
- ts.score(new Collector() {
- private int base = 0;
- private Scorer scorer;
- @Override
- public void setScorer(Scorer scorer) throws IOException {
- this.scorer = scorer;
- }
-
- @Override
- public void collect(int doc) throws IOException {
- float score = scorer.score();
- doc = doc + base;
- docs.add(new TestHit(doc, score));
- assertTrue("score " + score + " is not greater than 0", score > 0);
- assertTrue("Doc: " + doc + " does not equal 0 or doc does not equal 5",
- doc == 0 || doc == 5);
- }
- @Override
- public void setNextReader(IndexReader reader, int docBase) {
- base = docBase;
- }
- @Override
- public boolean acceptsDocsOutOfOrder() {
- return true;
- }
- });
- assertTrue("docs Size: " + docs.size() + " is not: " + 2, docs.size() == 2);
- TestHit doc0 = docs.get(0);
- TestHit doc5 = docs.get(1);
- //The scores should be the same
- assertTrue(doc0.score + " does not equal: " + doc5.score, doc0.score == doc5.score);
- /*
- Score should be (based on Default Sim.:
- All floats are approximate
- tf = 1
- numDocs = 6
- docFreq(all) = 2
- idf = ln(6/3) + 1 = 1.693147
- idf ^ 2 = 2.8667
- boost = 1
- lengthNorm = 1 //there is 1 term in every document
- coord = 1
- sumOfSquaredWeights = (idf * boost) ^ 2 = 1.693147 ^ 2 = 2.8667
- queryNorm = 1 / (sumOfSquaredWeights)^0.5 = 1 /(1.693147) = 0.590
-
- score = 1 * 2.8667 * 1 * 1 * 0.590 = 1.69
-
- */
- assertTrue(doc0.score + " does not equal: " + 1.6931472f, doc0.score == 1.6931472f);
- }
-
- public void testNext() throws Exception {
-
- Term allTerm = new Term(FIELD, "all");
- TermQuery termQuery = new TermQuery(allTerm);
-
- Weight weight = termQuery.weight(indexSearcher);
-
- Scorer ts = weight.scorer(indexSearcher.getIndexReader(),
- true, true);
- assertTrue("next did not return a doc", ts.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
- assertTrue("score is not correct", ts.score() == 1.6931472f);
- assertTrue("next did not return a doc", ts.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
- assertTrue("score is not correct", ts.score() == 1.6931472f);
- assertTrue("next returned a doc and it should not have", ts.nextDoc() == DocIdSetIterator.NO_MORE_DOCS);
- }
-
- public void testAdvance() throws Exception {
-
- Term allTerm = new Term(FIELD, "all");
- TermQuery termQuery = new TermQuery(allTerm);
-
- Weight weight = termQuery.weight(indexSearcher);
-
- Scorer ts = weight.scorer(indexSearcher.getIndexReader(),
- true, true);
- assertTrue("Didn't skip", ts.advance(3) != DocIdSetIterator.NO_MORE_DOCS);
- //The next doc should be doc 5
- assertTrue("doc should be number 5", ts.docID() == 5);
- }
-
- private class TestHit {
- public int doc;
- public float score;
-
- public TestHit(int doc, float score) {
- this.doc = doc;
- this.score = score;
- }
-
- @Override
- public String toString() {
- return "TestHit{" + "doc=" + doc + ", score=" + score + "}";
- }
- }
-
+ }
+
}
diff --git a/lucene/src/test/org/apache/lucene/search/TestTermVectors.java b/lucene/src/test/org/apache/lucene/search/TestTermVectors.java
index 6c11a280bc1..556eec44580 100644
--- a/lucene/src/test/org/apache/lucene/search/TestTermVectors.java
+++ b/lucene/src/test/org/apache/lucene/search/TestTermVectors.java
@@ -32,11 +32,16 @@ import org.apache.lucene.util.English;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
+import java.util.Random;
import java.util.SortedSet;
public class TestTermVectors extends LuceneTestCase {
private IndexSearcher searcher;
+ private IndexReader reader;
private Directory directory = new MockRAMDirectory();
+
+ private Random random;
+
public TestTermVectors(String s) {
super(s);
}
@@ -44,8 +49,9 @@ public class TestTermVectors extends LuceneTestCase {
@Override
protected void setUp() throws Exception {
super.setUp();
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true)));
+ random = newRandom();
+ RandomIndexWriter writer = new RandomIndexWriter(random, directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true)));
//writer.setUseCompoundFile(true);
//writer.infoStream = System.out;
for (int i = 0; i < 1000; i++) {
@@ -72,51 +78,55 @@ public class TestTermVectors extends LuceneTestCase {
Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
+ reader = writer.getReader();
writer.close();
- searcher = new IndexSearcher(directory, true);
+ searcher = new IndexSearcher(reader);
+ }
+
+ @Override
+ protected void tearDown() throws Exception {
+ searcher.close();
+ reader.close();
+ directory.close();
+ super.tearDown();
}
public void test() {
assertTrue(searcher != null);
}
- public void testTermVectors() {
+ public void testTermVectors() throws IOException {
Query query = new TermQuery(new Term("field", "seventy"));
- try {
- ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
- assertEquals(100, hits.length);
+ ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+ assertEquals(100, hits.length);
- for (int i = 0; i < hits.length; i++)
- {
- TermFreqVector [] vector = searcher.reader.getTermFreqVectors(hits[i].doc);
- assertTrue(vector != null);
- assertTrue(vector.length == 1);
- }
- TermFreqVector vector;
- vector = searcher.reader.getTermFreqVector(hits[0].doc, "noTV");
- assertNull(vector);
-
- TestTermVectorMapper mapper = new TestTermVectorMapper();
- searcher.reader.getTermFreqVector(hits[0].doc, "noTV", mapper);
- assertNull(mapper.field);
-
- } catch (IOException e) {
- assertTrue(false);
+ for (int i = 0; i < hits.length; i++)
+ {
+ TermFreqVector [] vector = searcher.reader.getTermFreqVectors(hits[i].doc);
+ assertTrue(vector != null);
+ assertTrue(vector.length == 1);
}
+ TermFreqVector vector;
+ vector = searcher.reader.getTermFreqVector(hits[0].doc, "noTV");
+ assertNull(vector);
+
+ TestTermVectorMapper mapper = new TestTermVectorMapper();
+ searcher.reader.getTermFreqVector(hits[0].doc, "noTV", mapper);
+ assertNull(mapper.field);
}
public void testTermVectorsFieldOrder() throws IOException {
Directory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true)));
+ RandomIndexWriter writer = new RandomIndexWriter(random, dir,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true)));
Document doc = new Document();
doc.add(new Field("c", "some content here", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("a", "some content here", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("b", "some content here", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("x", "some content here", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
+ IndexReader reader = writer.getReader();
writer.close();
- IndexReader reader = IndexReader.open(dir, true);
TermFreqVector[] v = reader.getTermFreqVectors(0);
assertEquals(4, v.length);
String[] expectedFields = new String[]{"a", "b", "c", "x"};
@@ -135,65 +145,57 @@ public class TestTermVectors extends LuceneTestCase {
assertEquals(expectedPositions[j], positions[0]);
}
}
+ reader.close();
+ dir.close();
}
- public void testTermPositionVectors() {
+ public void testTermPositionVectors() throws IOException {
Query query = new TermQuery(new Term("field", "zero"));
- try {
- ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
- assertEquals(1, hits.length);
+ ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+ assertEquals(1, hits.length);
+
+ for (int i = 0; i < hits.length; i++) {
+ TermFreqVector[] vector = searcher.reader.getTermFreqVectors(hits[i].doc);
+ assertTrue(vector != null);
+ assertTrue(vector.length == 1);
- for (int i = 0; i < hits.length; i++)
- {
- TermFreqVector [] vector = searcher.reader.getTermFreqVectors(hits[i].doc);
- assertTrue(vector != null);
- assertTrue(vector.length == 1);
+ boolean shouldBePosVector = (hits[i].doc % 2 == 0) ? true : false;
+ assertTrue((shouldBePosVector == false)
+ || (shouldBePosVector == true && (vector[0] instanceof TermPositionVector == true)));
+
+ boolean shouldBeOffVector = (hits[i].doc % 3 == 0) ? true : false;
+ assertTrue((shouldBeOffVector == false)
+ || (shouldBeOffVector == true && (vector[0] instanceof TermPositionVector == true)));
+
+ if (shouldBePosVector || shouldBeOffVector) {
+ TermPositionVector posVec = (TermPositionVector) vector[0];
+ BytesRef[] terms = posVec.getTerms();
+ assertTrue(terms != null && terms.length > 0);
- boolean shouldBePosVector = (hits[i].doc % 2 == 0) ? true : false;
- assertTrue((shouldBePosVector == false) || (shouldBePosVector == true && (vector[0] instanceof TermPositionVector == true)));
-
- boolean shouldBeOffVector = (hits[i].doc % 3 == 0) ? true : false;
- assertTrue((shouldBeOffVector == false) || (shouldBeOffVector == true && (vector[0] instanceof TermPositionVector == true)));
-
- if(shouldBePosVector || shouldBeOffVector){
- TermPositionVector posVec = (TermPositionVector)vector[0];
- BytesRef [] terms = posVec.getTerms();
+ for (int j = 0; j < terms.length; j++) {
+ int[] positions = posVec.getTermPositions(j);
+ TermVectorOffsetInfo[] offsets = posVec.getOffsets(j);
+
+ if (shouldBePosVector) {
+ assertTrue(positions != null);
+ assertTrue(positions.length > 0);
+ } else assertTrue(positions == null);
+
+ if (shouldBeOffVector) {
+ assertTrue(offsets != null);
+ assertTrue(offsets.length > 0);
+ } else assertTrue(offsets == null);
+ }
+ } else {
+ try {
+ assertTrue(false);
+ } catch (ClassCastException ignore) {
+ TermFreqVector freqVec = vector[0];
+ BytesRef[] terms = freqVec.getTerms();
assertTrue(terms != null && terms.length > 0);
-
- for (int j = 0; j < terms.length; j++) {
- int [] positions = posVec.getTermPositions(j);
- TermVectorOffsetInfo [] offsets = posVec.getOffsets(j);
-
- if(shouldBePosVector){
- assertTrue(positions != null);
- assertTrue(positions.length > 0);
- }
- else
- assertTrue(positions == null);
-
- if(shouldBeOffVector){
- assertTrue(offsets != null);
- assertTrue(offsets.length > 0);
- }
- else
- assertTrue(offsets == null);
- }
}
- else{
- try{
- assertTrue(false);
- }
- catch(ClassCastException ignore){
- TermFreqVector freqVec = vector[0];
- BytesRef [] terms = freqVec.getTerms();
- assertTrue(terms != null && terms.length > 0);
- }
-
- }
-
+
}
- } catch (IOException e) {
- assertTrue(false);
}
}
@@ -216,7 +218,7 @@ public class TestTermVectors extends LuceneTestCase {
}
}
- public void testKnownSetOfDocuments() {
+ public void testKnownSetOfDocuments() throws IOException {
String test1 = "eating chocolate in a computer lab"; //6 terms
String test2 = "computer in a computer lab"; //5 terms
String test3 = "a chocolate lab grows old"; //5 terms
@@ -242,112 +244,109 @@ public class TestTermVectors extends LuceneTestCase {
setupDoc(testDoc3, test3);
Document testDoc4 = new Document();
setupDoc(testDoc4, test4);
-
+
Directory dir = new MockRAMDirectory();
- try {
- IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
- TEST_VERSION_CURRENT,
- new MockAnalyzer(MockTokenizer.SIMPLE, true))
- .setOpenMode(OpenMode.CREATE));
- writer.addDocument(testDoc1);
- writer.addDocument(testDoc2);
- writer.addDocument(testDoc3);
- writer.addDocument(testDoc4);
- writer.close();
- IndexSearcher knownSearcher = new IndexSearcher(dir, true);
- FieldsEnum fields = MultiFields.getFields(knownSearcher.reader).iterator();
-
- DocsEnum docs = null;
- while(fields.next() != null) {
- TermsEnum terms = fields.terms();
- while(terms.next() != null) {
- String text = terms.term().utf8ToString();
- docs = terms.docs(MultiFields.getDeletedDocs(knownSearcher.reader), docs);
-
- while (docs.nextDoc() != DocsEnum.NO_MORE_DOCS) {
- int docId = docs.docID();
- int freq = docs.freq();
- //System.out.println("Doc Id: " + docId + " freq " + freq);
- TermFreqVector vector = knownSearcher.reader.getTermFreqVector(docId, "field");
- //float tf = sim.tf(freq);
- //float idf = sim.idf(knownSearcher.docFreq(term), knownSearcher.maxDoc());
- //float qNorm = sim.queryNorm()
- //This is fine since we don't have stop words
- //float lNorm = sim.lengthNorm("field", vector.getTerms().length);
- //float coord = sim.coord()
- //System.out.println("TF: " + tf + " IDF: " + idf + " LenNorm: " + lNorm);
- assertTrue(vector != null);
- BytesRef[] vTerms = vector.getTerms();
- int [] freqs = vector.getTermFrequencies();
- for (int i = 0; i < vTerms.length; i++)
- {
- if (text.equals(vTerms[i].utf8ToString()))
- {
- assertTrue(freqs[i] == freq);
- }
- }
+ RandomIndexWriter writer = new RandomIndexWriter(random, dir,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true))
+ .setOpenMode(OpenMode.CREATE));
+ writer.addDocument(testDoc1);
+ writer.addDocument(testDoc2);
+ writer.addDocument(testDoc3);
+ writer.addDocument(testDoc4);
+ IndexReader reader = writer.getReader();
+ writer.close();
+ IndexSearcher knownSearcher = new IndexSearcher(reader);
+ FieldsEnum fields = MultiFields.getFields(knownSearcher.reader).iterator();
+
+ DocsEnum docs = null;
+ while(fields.next() != null) {
+ TermsEnum terms = fields.terms();
+ while(terms.next() != null) {
+ String text = terms.term().utf8ToString();
+ docs = terms.docs(MultiFields.getDeletedDocs(knownSearcher.reader), docs);
+
+ while (docs.nextDoc() != DocsEnum.NO_MORE_DOCS) {
+ int docId = docs.docID();
+ int freq = docs.freq();
+ //System.out.println("Doc Id: " + docId + " freq " + freq);
+ TermFreqVector vector = knownSearcher.reader.getTermFreqVector(docId, "field");
+ //float tf = sim.tf(freq);
+ //float idf = sim.idf(knownSearcher.docFreq(term), knownSearcher.maxDoc());
+ //float qNorm = sim.queryNorm()
+ //This is fine since we don't have stop words
+ //float lNorm = sim.lengthNorm("field", vector.getTerms().length);
+ //float coord = sim.coord()
+ //System.out.println("TF: " + tf + " IDF: " + idf + " LenNorm: " + lNorm);
+ assertTrue(vector != null);
+ BytesRef[] vTerms = vector.getTerms();
+ int [] freqs = vector.getTermFrequencies();
+ for (int i = 0; i < vTerms.length; i++)
+ {
+ if (text.equals(vTerms[i].utf8ToString()))
+ {
+ assertTrue(freqs[i] == freq);
+ }
}
}
- //System.out.println("--------");
}
- Query query = new TermQuery(new Term("field", "chocolate"));
- ScoreDoc[] hits = knownSearcher.search(query, null, 1000).scoreDocs;
- //doc 3 should be the first hit b/c it is the shortest match
- assertTrue(hits.length == 3);
- /*System.out.println("Hit 0: " + hits.id(0) + " Score: " + hits.score(0) + " String: " + hits.doc(0).toString());
+ //System.out.println("--------");
+ }
+ Query query = new TermQuery(new Term("field", "chocolate"));
+ ScoreDoc[] hits = knownSearcher.search(query, null, 1000).scoreDocs;
+ //doc 3 should be the first hit b/c it is the shortest match
+ assertTrue(hits.length == 3);
+ /*System.out.println("Hit 0: " + hits.id(0) + " Score: " + hits.score(0) + " String: " + hits.doc(0).toString());
System.out.println("Explain: " + knownSearcher.explain(query, hits.id(0)));
System.out.println("Hit 1: " + hits.id(1) + " Score: " + hits.score(1) + " String: " + hits.doc(1).toString());
System.out.println("Explain: " + knownSearcher.explain(query, hits.id(1)));
System.out.println("Hit 2: " + hits.id(2) + " Score: " + hits.score(2) + " String: " + hits.doc(2).toString());
System.out.println("Explain: " + knownSearcher.explain(query, hits.id(2)));*/
- assertTrue(hits[0].doc == 2);
- assertTrue(hits[1].doc == 3);
- assertTrue(hits[2].doc == 0);
- TermFreqVector vector = knownSearcher.reader.getTermFreqVector(hits[1].doc, "field");
- assertTrue(vector != null);
- //System.out.println("Vector: " + vector);
- BytesRef[] terms = vector.getTerms();
- int [] freqs = vector.getTermFrequencies();
- assertTrue(terms != null && terms.length == 10);
- for (int i = 0; i < terms.length; i++) {
- String term = terms[i].utf8ToString();
- //System.out.println("Term: " + term);
- int freq = freqs[i];
- assertTrue(test4.indexOf(term) != -1);
- Integer freqInt = test4Map.get(term);
- assertTrue(freqInt != null);
- assertTrue(freqInt.intValue() == freq);
- }
- SortedTermVectorMapper mapper = new SortedTermVectorMapper(new TermVectorEntryFreqSortedComparator());
- knownSearcher.reader.getTermFreqVector(hits[1].doc, mapper);
- SortedSet vectorEntrySet = mapper.getTermVectorEntrySet();
- assertTrue("mapper.getTermVectorEntrySet() Size: " + vectorEntrySet.size() + " is not: " + 10, vectorEntrySet.size() == 10);
- TermVectorEntry last = null;
- for (final TermVectorEntry tve : vectorEntrySet) {
- if (tve != null && last != null)
- {
- assertTrue("terms are not properly sorted", last.getFrequency() >= tve.getFrequency());
- Integer expectedFreq = test4Map.get(tve.getTerm().utf8ToString());
- //we expect double the expectedFreq, since there are two fields with the exact same text and we are collapsing all fields
- assertTrue("Frequency is not correct:", tve.getFrequency() == 2*expectedFreq.intValue());
- }
- last = tve;
-
- }
-
- FieldSortedTermVectorMapper fieldMapper = new FieldSortedTermVectorMapper(new TermVectorEntryFreqSortedComparator());
- knownSearcher.reader.getTermFreqVector(hits[1].doc, fieldMapper);
- Map> map = fieldMapper.getFieldToTerms();
- assertTrue("map Size: " + map.size() + " is not: " + 2, map.size() == 2);
- vectorEntrySet = map.get("field");
- assertTrue("vectorEntrySet is null and it shouldn't be", vectorEntrySet != null);
- assertTrue("vectorEntrySet Size: " + vectorEntrySet.size() + " is not: " + 10, vectorEntrySet.size() == 10);
- knownSearcher.close();
- } catch (IOException e) {
- e.printStackTrace();
- assertTrue(false);
+ assertTrue(hits[0].doc == 2);
+ assertTrue(hits[1].doc == 3);
+ assertTrue(hits[2].doc == 0);
+ TermFreqVector vector = knownSearcher.reader.getTermFreqVector(hits[1].doc, "field");
+ assertTrue(vector != null);
+ //System.out.println("Vector: " + vector);
+ BytesRef[] terms = vector.getTerms();
+ int [] freqs = vector.getTermFrequencies();
+ assertTrue(terms != null && terms.length == 10);
+ for (int i = 0; i < terms.length; i++) {
+ String term = terms[i].utf8ToString();
+ //System.out.println("Term: " + term);
+ int freq = freqs[i];
+ assertTrue(test4.indexOf(term) != -1);
+ Integer freqInt = test4Map.get(term);
+ assertTrue(freqInt != null);
+ assertTrue(freqInt.intValue() == freq);
}
+ SortedTermVectorMapper mapper = new SortedTermVectorMapper(new TermVectorEntryFreqSortedComparator());
+ knownSearcher.reader.getTermFreqVector(hits[1].doc, mapper);
+ SortedSet vectorEntrySet = mapper.getTermVectorEntrySet();
+ assertTrue("mapper.getTermVectorEntrySet() Size: " + vectorEntrySet.size() + " is not: " + 10, vectorEntrySet.size() == 10);
+ TermVectorEntry last = null;
+ for (final TermVectorEntry tve : vectorEntrySet) {
+ if (tve != null && last != null)
+ {
+ assertTrue("terms are not properly sorted", last.getFrequency() >= tve.getFrequency());
+ Integer expectedFreq = test4Map.get(tve.getTerm().utf8ToString());
+ //we expect double the expectedFreq, since there are two fields with the exact same text and we are collapsing all fields
+ assertTrue("Frequency is not correct:", tve.getFrequency() == 2*expectedFreq.intValue());
+ }
+ last = tve;
+
+ }
+
+ FieldSortedTermVectorMapper fieldMapper = new FieldSortedTermVectorMapper(new TermVectorEntryFreqSortedComparator());
+ knownSearcher.reader.getTermFreqVector(hits[1].doc, fieldMapper);
+ Map> map = fieldMapper.getFieldToTerms();
+ assertTrue("map Size: " + map.size() + " is not: " + 2, map.size() == 2);
+ vectorEntrySet = map.get("field");
+ assertTrue("vectorEntrySet is null and it shouldn't be", vectorEntrySet != null);
+ assertTrue("vectorEntrySet Size: " + vectorEntrySet.size() + " is not: " + 10, vectorEntrySet.size() == 10);
+ knownSearcher.close();
+ reader.close();
+ dir.close();
}
private void setupDoc(Document doc, String text)
@@ -361,8 +360,8 @@ public class TestTermVectors extends LuceneTestCase {
// Test only a few docs having vectors
public void testRareVectors() throws IOException {
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true))
+ RandomIndexWriter writer = new RandomIndexWriter(random, directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true))
.setOpenMode(OpenMode.CREATE));
for (int i = 0; i < 100; i++) {
Document doc = new Document();
@@ -377,8 +376,9 @@ public class TestTermVectors extends LuceneTestCase {
writer.addDocument(doc);
}
+ IndexReader reader = writer.getReader();
writer.close();
- searcher = new IndexSearcher(directory, true);
+ searcher = new IndexSearcher(reader);
Query query = new TermQuery(new Term("field", "hundred"));
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
@@ -388,14 +388,15 @@ public class TestTermVectors extends LuceneTestCase {
assertTrue(vector != null);
assertTrue(vector.length == 1);
}
+ reader.close();
}
// In a single doc, for the same field, mix the term
// vectors up
public void testMixedVectrosVectors() throws IOException {
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(
- TEST_VERSION_CURRENT,
+ RandomIndexWriter writer = new RandomIndexWriter(random, directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer(MockTokenizer.SIMPLE, true)).setOpenMode(OpenMode.CREATE));
Document doc = new Document();
doc.add(new Field("field", "one",
@@ -409,9 +410,10 @@ public class TestTermVectors extends LuceneTestCase {
doc.add(new Field("field", "one",
Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
+ IndexReader reader = writer.getReader();
writer.close();
- searcher = new IndexSearcher(directory, true);
+ searcher = new IndexSearcher(reader);
Query query = new TermQuery(new Term("field", "one"));
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
@@ -437,6 +439,7 @@ public class TestTermVectors extends LuceneTestCase {
assertEquals(4*i, offsets[i].getStartOffset());
assertEquals(4*i+3, offsets[i].getEndOffset());
}
+ reader.close();
}
private static class TestTermVectorMapper extends TermVectorMapper {
diff --git a/lucene/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java b/lucene/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java
index b6306080dc5..e1524598da2 100644
--- a/lucene/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java
+++ b/lucene/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java
@@ -24,8 +24,8 @@ import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.TimeLimitingCollector.TimeExceededException;
import org.apache.lucene.store.Directory;
@@ -51,6 +51,9 @@ public class TestTimeLimitingCollector extends LuceneTestCase {
private static final int N_THREADS = 50;
private Searcher searcher;
+ private Directory directory;
+ private IndexReader reader;
+
private final String FIELD_NAME = "body";
private Query query;
@@ -74,14 +77,16 @@ public class TestTimeLimitingCollector extends LuceneTestCase {
"blueberry strudel",
"blueberry pizza",
};
- Directory directory = new RAMDirectory();
- IndexWriter iw = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ directory = new RAMDirectory();
+ RandomIndexWriter iw = new RandomIndexWriter(newRandom(), directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i=0; i doSearch(int numResults) throws IOException {
Query q = new MatchAllDocsQuery();
- IndexSearcher searcher = new IndexSearcher(dir, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
TopDocsCollector tdc = new MyTopsDocCollector(numResults);
searcher.search(q, tdc);
searcher.close();
@@ -109,15 +110,17 @@ public class TestTopDocsCollector extends LuceneTestCase {
// populate an index with 30 documents, this should be enough for the test.
// The documents have no content - the test uses MatchAllDocsQuery().
- IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 30; i++) {
writer.addDocument(new Document());
}
+ reader = writer.getReader();
writer.close();
}
@Override
protected void tearDown() throws Exception {
+ reader.close();
dir.close();
dir = null;
super.tearDown();
diff --git a/lucene/src/test/org/apache/lucene/search/TestTopScoreDocCollector.java b/lucene/src/test/org/apache/lucene/search/TestTopScoreDocCollector.java
index f3f803922f5..045db95a630 100644
--- a/lucene/src/test/org/apache/lucene/search/TestTopScoreDocCollector.java
+++ b/lucene/src/test/org/apache/lucene/search/TestTopScoreDocCollector.java
@@ -17,10 +17,13 @@ package org.apache.lucene.search;
* limitations under the License.
*/
+import java.util.Random;
+
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
-import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
@@ -38,12 +41,12 @@ public class TestTopScoreDocCollector extends LuceneTestCase {
public void testOutOfOrderCollection() throws Exception {
Directory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ Random random = newRandom();
+ RandomIndexWriter writer = new RandomIndexWriter(random, dir,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 10; i++) {
writer.addDocument(new Document());
}
- writer.commit();
- writer.close();
boolean[] inOrder = new boolean[] { false, true };
String[] actualTSDCClass = new String[] {
@@ -58,7 +61,8 @@ public class TestTopScoreDocCollector extends LuceneTestCase {
// Set minNrShouldMatch to 1 so that BQ will not optimize rewrite to return
// the clause instead of BQ.
bq.setMinimumNumberShouldMatch(1);
- IndexSearcher searcher = new IndexSearcher(dir, true);
+ IndexReader reader = writer.getReader();
+ IndexSearcher searcher = new IndexSearcher(reader);
for (int i = 0; i < inOrder.length; i++) {
TopDocsCollector tdc = TopScoreDocCollector.create(3, inOrder[i]);
assertEquals("org.apache.lucene.search.TopScoreDocCollector$" + actualTSDCClass[i], tdc.getClass().getName());
@@ -71,6 +75,10 @@ public class TestTopScoreDocCollector extends LuceneTestCase {
assertEquals("expected doc Id " + j + " found " + sd[j].doc, j, sd[j].doc);
}
}
+ writer.close();
+ searcher.close();
+ reader.close();
+ dir.close();
}
}
diff --git a/lucene/src/test/org/apache/lucene/search/TestWildcard.java b/lucene/src/test/org/apache/lucene/search/TestWildcard.java
index a45cf94d630..3733a136487 100644
--- a/lucene/src/test/org/apache/lucene/search/TestWildcard.java
+++ b/lucene/src/test/org/apache/lucene/search/TestWildcard.java
@@ -23,19 +23,28 @@ import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.Field.Index;
-import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.store.RAMDirectory;
import java.io.IOException;
+import java.util.Random;
/**
* TestWildcard tests the '*' and '?' wildcard characters.
*/
public class TestWildcard
extends LuceneTestCase {
+ private Random random;
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ random = newRandom();
+ }
+
public void testEquals() {
WildcardQuery wq1 = new WildcardQuery(new Term("field", "b*a"));
WildcardQuery wq2 = new WildcardQuery(new Term("field", "b*a"));
@@ -193,14 +202,13 @@ public class TestWildcard
private RAMDirectory getIndexStore(String field, String[] contents)
throws IOException {
RAMDirectory indexStore = new RAMDirectory();
- IndexWriter writer = new IndexWriter(indexStore, new IndexWriterConfig(
+ RandomIndexWriter writer = new RandomIndexWriter(random, indexStore, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < contents.length; ++i) {
Document doc = new Document();
doc.add(new Field(field, contents[i], Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
- writer.optimize();
writer.close();
return indexStore;
@@ -251,7 +259,8 @@ public class TestWildcard
// prepare the index
RAMDirectory dir = new RAMDirectory();
- IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ RandomIndexWriter iw = new RandomIndexWriter(random, dir,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < docs.length; i++) {
Document doc = new Document();
doc.add(new Field(field,docs[i],Store.NO,Index.ANALYZED));
diff --git a/lucene/src/test/org/apache/lucene/search/TestWildcardRandom.java b/lucene/src/test/org/apache/lucene/search/TestWildcardRandom.java
index c7891815f6d..a906c6a43cb 100644
--- a/lucene/src/test/org/apache/lucene/search/TestWildcardRandom.java
+++ b/lucene/src/test/org/apache/lucene/search/TestWildcardRandom.java
@@ -26,8 +26,11 @@ import java.util.Random;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
+import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util._TestUtil;
@@ -40,13 +43,15 @@ import org.apache.lucene.util._TestUtil;
public class TestWildcardRandom extends LuceneTestCase {
private Searcher searcher;
private Random random;
+ private Directory dir;
@Override
protected void setUp() throws Exception {
super.setUp();
- RAMDirectory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new MockAnalyzer(),
- IndexWriter.MaxFieldLength.UNLIMITED);
+ random = newRandom();
+ dir = new RAMDirectory();
+ RandomIndexWriter writer = new RandomIndexWriter(random, dir,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
Field field = new Field("field", "", Field.Store.NO, Field.Index.ANALYZED);
@@ -58,9 +63,9 @@ public class TestWildcardRandom extends LuceneTestCase {
writer.addDocument(doc);
}
- writer.optimize();
+ IndexReader reader = writer.getReader();
+ searcher = new IndexSearcher(reader);
writer.close();
- searcher = new IndexSearcher(dir);
}
private char N() {
@@ -82,6 +87,7 @@ public class TestWildcardRandom extends LuceneTestCase {
}
private void assertPatternHits(String pattern, int numHits) throws Exception {
+ // TODO: run with different rewrites
Query wq = new WildcardQuery(new Term("field", fillPattern(pattern)));
TopDocs docs = searcher.search(wq, 25);
assertEquals("Incorrect hits for pattern: " + pattern, numHits, docs.totalHits);
@@ -90,11 +96,11 @@ public class TestWildcardRandom extends LuceneTestCase {
@Override
protected void tearDown() throws Exception {
searcher.close();
+ dir.close();
super.tearDown();
}
- public void testWildcards() throws Exception {
- random = newRandom(System.nanoTime());
+ public void testWildcards() throws Exception {;
for (int i = 0; i < 100*_TestUtil.getRandomMultiplier(); i++) {
assertPatternHits("NNNN", 1);
assertPatternHits("?NNN", 10);
diff --git a/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java b/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java
index 52b7ba84f52..51f74b81a2b 100644
--- a/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java
+++ b/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java
@@ -26,9 +26,10 @@ import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.Payload;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.DefaultSimilarity;
import org.apache.lucene.search.IndexSearcher;
@@ -39,6 +40,7 @@ import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.search.spans.SpanNearQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
+import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.English;
import org.apache.lucene.util.LuceneTestCase;
@@ -47,6 +49,8 @@ import org.apache.lucene.search.Explanation.IDFExplanation;
public class TestPayloadNearQuery extends LuceneTestCase {
private IndexSearcher searcher;
+ private IndexReader reader;
+ private Directory directory;
private BoostingSimilarity similarity = new BoostingSimilarity();
private byte[] payload2 = new byte[]{2};
private byte[] payload4 = new byte[]{4};
@@ -101,9 +105,10 @@ public class TestPayloadNearQuery extends LuceneTestCase {
@Override
protected void setUp() throws Exception {
super.setUp();
- RAMDirectory directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(
- TEST_VERSION_CURRENT, new PayloadAnalyzer()).setSimilarity(similarity));
+ directory = new RAMDirectory();
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new PayloadAnalyzer())
+ .setSimilarity(similarity));
//writer.infoStream = System.out;
for (int i = 0; i < 1000; i++) {
Document doc = new Document();
@@ -112,13 +117,21 @@ public class TestPayloadNearQuery extends LuceneTestCase {
doc.add(new Field("field2", txt, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
- writer.optimize();
+ reader = writer.getReader();
writer.close();
- searcher = new IndexSearcher(directory, true);
+ searcher = new IndexSearcher(reader);
searcher.setSimilarity(similarity);
}
+ @Override
+ protected void tearDown() throws Exception {
+ searcher.close();
+ reader.close();
+ directory.close();
+ super.tearDown();
+ }
+
public void test() throws IOException {
PayloadNearQuery query;
TopDocs hits;
diff --git a/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java b/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java
index 7af93bd2d2e..1b00c3e8238 100644
--- a/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java
+++ b/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java
@@ -34,9 +34,10 @@ import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.Payload;
-import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.document.Document;
@@ -52,6 +53,7 @@ import java.io.IOException;
**/
public class TestPayloadTermQuery extends LuceneTestCase {
private IndexSearcher searcher;
+ private IndexReader reader;
private BoostingSimilarity similarity = new BoostingSimilarity();
private byte[] payloadField = new byte[]{1};
private byte[] payloadMultiField1 = new byte[]{2};
@@ -110,9 +112,9 @@ public class TestPayloadTermQuery extends LuceneTestCase {
protected void setUp() throws Exception {
super.setUp();
directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(
- TEST_VERSION_CURRENT, new PayloadAnalyzer()).setSimilarity(
- similarity));
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new PayloadAnalyzer())
+ .setSimilarity(similarity));
//writer.infoStream = System.out;
for (int i = 0; i < 1000; i++) {
Document doc = new Document();
@@ -123,13 +125,21 @@ public class TestPayloadTermQuery extends LuceneTestCase {
doc.add(new Field("multiField", English.intToEnglish(i) + " " + English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
- writer.optimize();
+ reader = writer.getReader();
writer.close();
- searcher = new IndexSearcher(directory, true);
+ searcher = new IndexSearcher(reader);
searcher.setSimilarity(similarity);
}
+ @Override
+ protected void tearDown() throws Exception {
+ searcher.close();
+ reader.close();
+ directory.close();
+ super.tearDown();
+ }
+
public void test() throws IOException {
PayloadTermQuery query = new PayloadTermQuery(new Term("field", "seventy"),
new MaxPayloadFunction());
diff --git a/lucene/src/test/org/apache/lucene/search/spans/TestBasics.java b/lucene/src/test/org/apache/lucene/search/spans/TestBasics.java
index d91cb1dbb80..6b78efc0d38 100644
--- a/lucene/src/test/org/apache/lucene/search/spans/TestBasics.java
+++ b/lucene/src/test/org/apache/lucene/search/spans/TestBasics.java
@@ -23,8 +23,9 @@ import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
@@ -34,6 +35,7 @@ import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryUtils;
import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.English;
import org.apache.lucene.util.LuceneTestCase;
@@ -52,25 +54,35 @@ import org.apache.lucene.util.LuceneTestCase;
*/
public class TestBasics extends LuceneTestCase {
private IndexSearcher searcher;
+ private IndexReader reader;
+ private Directory directory;
@Override
protected void setUp() throws Exception {
super.setUp();
- RAMDirectory directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true)));
+ directory = new RAMDirectory();
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true)));
//writer.infoStream = System.out;
for (int i = 0; i < 1000; i++) {
Document doc = new Document();
doc.add(new Field("field", English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
-
+ reader = writer.getReader();
+ searcher = new IndexSearcher(reader);
writer.close();
-
- searcher = new IndexSearcher(directory, true);
}
-
+
+ @Override
+ protected void tearDown() throws Exception {
+ searcher.close();
+ reader.close();
+ directory.close();
+ super.tearDown();
+ }
+
+
public void testTerm() throws Exception {
Query query = new TermQuery(new Term("field", "seventy"));
checkHits(query, new int[]
diff --git a/lucene/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java b/lucene/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java
index 28cc7a2374f..94b17f34552 100644
--- a/lucene/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java
+++ b/lucene/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java
@@ -24,13 +24,14 @@ import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.CheckHits;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryUtils;
+import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
@@ -49,12 +50,15 @@ public class TestFieldMaskingSpanQuery extends LuceneTestCase {
}
protected IndexSearcher searcher;
+ protected Directory directory;
+ protected IndexReader reader;
@Override
protected void setUp() throws Exception {
super.setUp();
- RAMDirectory directory = new RAMDirectory();
- IndexWriter writer= new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ directory = new RAMDirectory();
+ RandomIndexWriter writer= new RandomIndexWriter(newRandom(), directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer.addDocument(doc(new Field[] { field("id", "0")
,
@@ -109,14 +113,16 @@ public class TestFieldMaskingSpanQuery extends LuceneTestCase {
field("gender", "male"),
field("first", "bubba"),
field("last", "jones") }));
-
+ reader = writer.getReader();
writer.close();
- searcher = new IndexSearcher(directory, true);
+ searcher = new IndexSearcher(reader);
}
@Override
protected void tearDown() throws Exception {
searcher.close();
+ reader.close();
+ directory.close();
super.tearDown();
}
diff --git a/lucene/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java b/lucene/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java
index bc8bd2fd34c..02bdb5cd490 100644
--- a/lucene/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java
+++ b/lucene/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java
@@ -20,8 +20,9 @@ package org.apache.lucene.search.spans;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.CheckHits;
@@ -29,11 +30,14 @@ import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Weight;
import org.apache.lucene.search.Scorer;
+import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
public class TestNearSpansOrdered extends LuceneTestCase {
protected IndexSearcher searcher;
+ protected Directory directory;
+ protected IndexReader reader;
public static final String FIELD = "field";
public static final QueryParser qp =
@@ -42,21 +46,25 @@ public class TestNearSpansOrdered extends LuceneTestCase {
@Override
protected void tearDown() throws Exception {
searcher.close();
+ reader.close();
+ directory.close();
super.tearDown();
}
@Override
protected void setUp() throws Exception {
super.setUp();
- RAMDirectory directory = new RAMDirectory();
- IndexWriter writer= new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ directory = new RAMDirectory();
+ RandomIndexWriter writer= new RandomIndexWriter(newRandom(), directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < docFields.length; i++) {
Document doc = new Document();
doc.add(new Field(FIELD, docFields[i], Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}
+ reader = writer.getReader();
writer.close();
- searcher = new IndexSearcher(directory, true);
+ searcher = new IndexSearcher(reader);
}
protected String[] docFields = {
diff --git a/lucene/src/test/org/apache/lucene/search/spans/TestSpans.java b/lucene/src/test/org/apache/lucene/search/spans/TestSpans.java
index 4901e61137b..9b4baf07549 100644
--- a/lucene/src/test/org/apache/lucene/search/spans/TestSpans.java
+++ b/lucene/src/test/org/apache/lucene/search/spans/TestSpans.java
@@ -33,32 +33,44 @@ import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.util.LuceneTestCase;
import java.io.IOException;
-import java.util.Collections;
public class TestSpans extends LuceneTestCase {
private IndexSearcher searcher;
+ private IndexReader reader;
+ private Directory directory;
public static final String field = "field";
@Override
protected void setUp() throws Exception {
super.setUp();
- RAMDirectory directory = new RAMDirectory();
- IndexWriter writer= new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ directory = new RAMDirectory();
+ RandomIndexWriter writer= new RandomIndexWriter(newRandom(), directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < docFields.length; i++) {
Document doc = new Document();
doc.add(new Field(field, docFields[i], Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
+ reader = writer.getReader();
writer.close();
- searcher = new IndexSearcher(directory, true);
+ searcher = new IndexSearcher(reader);
}
-
+
+ @Override
+ protected void tearDown() throws Exception {
+ searcher.close();
+ reader.close();
+ directory.close();
+ super.tearDown();
+ }
+
private String[] docFields = {
"w1 w2 w3 w4 w5",
"w1 w3 w2 w3",
diff --git a/lucene/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java b/lucene/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java
index 96ba6228e64..ab3b38fdde7 100644
--- a/lucene/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java
+++ b/lucene/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java
@@ -18,6 +18,7 @@ package org.apache.lucene.search.spans;
*/
import java.io.IOException;
+import java.util.Random;
import org.apache.lucene.util.LuceneTestCase;
@@ -26,8 +27,9 @@ import org.apache.lucene.analysis.MockTokenFilter;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.*;
import org.apache.lucene.store.Directory;
@@ -36,136 +38,145 @@ import org.apache.lucene.store.RAMDirectory;
/*******************************************************************************
* Tests the span query bug in Lucene. It demonstrates that SpanTermQuerys don't
* work correctly in a BooleanQuery.
- *
+ *
*/
public class TestSpansAdvanced extends LuceneTestCase {
-
- // location to the index
- protected Directory mDirectory;
-
- protected IndexSearcher searcher;
-
- // field names in the index
- private final static String FIELD_ID = "ID";
- protected final static String FIELD_TEXT = "TEXT";
-
- /**
- * Initializes the tests by adding 4 identical documents to the index.
- */
- @Override
- protected void setUp() throws Exception {
- super.setUp();
-
- // create test index
- mDirectory = new RAMDirectory();
- final IndexWriter writer = new IndexWriter(mDirectory,
- new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true)));
- addDocument(writer, "1", "I think it should work.");
- addDocument(writer, "2", "I think it should work.");
- addDocument(writer, "3", "I think it should work.");
- addDocument(writer, "4", "I think it should work.");
- writer.close();
- searcher = new IndexSearcher(mDirectory, true);
+
+ // location to the index
+ protected Directory mDirectory;
+ protected IndexReader reader;
+ protected IndexSearcher searcher;
+ protected Random random;
+
+ // field names in the index
+ private final static String FIELD_ID = "ID";
+ protected final static String FIELD_TEXT = "TEXT";
+
+ /**
+ * Initializes the tests by adding 4 identical documents to the index.
+ */
+ @Override
+ protected void setUp() throws Exception {
+ super.setUp();
+ random = newRandom();
+ // create test index
+ mDirectory = new RAMDirectory();
+ final RandomIndexWriter writer = new RandomIndexWriter(random,
+ mDirectory, new IndexWriterConfig(TEST_VERSION_CURRENT,
+ new MockAnalyzer(MockTokenizer.SIMPLE, true,
+ MockTokenFilter.ENGLISH_STOPSET, true)));
+ addDocument(writer, "1", "I think it should work.");
+ addDocument(writer, "2", "I think it should work.");
+ addDocument(writer, "3", "I think it should work.");
+ addDocument(writer, "4", "I think it should work.");
+ reader = writer.getReader();
+ writer.close();
+ searcher = new IndexSearcher(reader);
+ }
+
+ @Override
+ protected void tearDown() throws Exception {
+ searcher.close();
+ reader.close();
+ mDirectory.close();
+ mDirectory = null;
+ super.tearDown();
+ }
+
+ /**
+ * Adds the document to the index.
+ *
+ * @param writer the Lucene index writer
+ * @param id the unique id of the document
+ * @param text the text of the document
+ * @throws IOException
+ */
+ protected void addDocument(final RandomIndexWriter writer, final String id,
+ final String text) throws IOException {
+
+ final Document document = new Document();
+ document.add(new Field(FIELD_ID, id, Field.Store.YES,
+ Field.Index.NOT_ANALYZED));
+ document.add(new Field(FIELD_TEXT, text, Field.Store.YES,
+ Field.Index.ANALYZED));
+ writer.addDocument(document);
+ }
+
+ /**
+ * Tests two span queries.
+ *
+ * @throws IOException
+ */
+ public void testBooleanQueryWithSpanQueries() throws IOException {
+
+ doTestBooleanQueryWithSpanQueries(searcher, 0.3884282f);
+ }
+
+ /**
+ * Tests two span queries.
+ *
+ * @throws IOException
+ */
+ protected void doTestBooleanQueryWithSpanQueries(IndexSearcher s,
+ final float expectedScore) throws IOException {
+
+ final Query spanQuery = new SpanTermQuery(new Term(FIELD_TEXT, "work"));
+ final BooleanQuery query = new BooleanQuery();
+ query.add(spanQuery, BooleanClause.Occur.MUST);
+ query.add(spanQuery, BooleanClause.Occur.MUST);
+ final String[] expectedIds = new String[] {"1", "2", "3", "4"};
+ final float[] expectedScores = new float[] {expectedScore, expectedScore,
+ expectedScore, expectedScore};
+ assertHits(s, query, "two span queries", expectedIds, expectedScores);
+ }
+
+ /**
+ * Checks to see if the hits are what we expected.
+ *
+ * @param query the query to execute
+ * @param description the description of the search
+ * @param expectedIds the expected document ids of the hits
+ * @param expectedScores the expected scores of the hits
+ *
+ * @throws IOException
+ */
+ protected static void assertHits(Searcher s, Query query,
+ final String description, final String[] expectedIds,
+ final float[] expectedScores) throws IOException {
+ QueryUtils.check(query, s);
+
+ final float tolerance = 1e-5f;
+
+ // Hits hits = searcher.search(query);
+ // hits normalizes and throws things off if one score is greater than 1.0
+ TopDocs topdocs = s.search(query, null, 10000);
+
+ /*****
+ * // display the hits System.out.println(hits.length() +
+ * " hits for search: \"" + description + '\"'); for (int i = 0; i <
+ * hits.length(); i++) { System.out.println(" " + FIELD_ID + ':' +
+ * hits.doc(i).get(FIELD_ID) + " (score:" + hits.score(i) + ')'); }
+ *****/
+
+ // did we get the hits we expected
+ assertEquals(expectedIds.length, topdocs.totalHits);
+ for (int i = 0; i < topdocs.totalHits; i++) {
+ // System.out.println(i + " exp: " + expectedIds[i]);
+ // System.out.println(i + " field: " + hits.doc(i).get(FIELD_ID));
+
+ int id = topdocs.scoreDocs[i].doc;
+ float score = topdocs.scoreDocs[i].score;
+ Document doc = s.doc(id);
+ assertEquals(expectedIds[i], doc.get(FIELD_ID));
+ boolean scoreEq = Math.abs(expectedScores[i] - score) < tolerance;
+ if (!scoreEq) {
+ System.out.println(i + " warning, expected score: " + expectedScores[i]
+ + ", actual " + score);
+ System.out.println(s.explain(query, id));
+ }
+ assertEquals(expectedScores[i], score, tolerance);
+ assertEquals(s.explain(query, id).getValue(), score, tolerance);
}
-
- @Override
- protected void tearDown() throws Exception {
- searcher.close();
- mDirectory.close();
- mDirectory = null;
- super.tearDown();
- }
-
- /**
- * Adds the document to the index.
- *
- * @param writer the Lucene index writer
- * @param id the unique id of the document
- * @param text the text of the document
- * @throws IOException
- */
- protected void addDocument(final IndexWriter writer, final String id, final String text) throws IOException {
-
- final Document document = new Document();
- document.add(new Field(FIELD_ID, id, Field.Store.YES, Field.Index.NOT_ANALYZED));
- document.add(new Field(FIELD_TEXT, text, Field.Store.YES, Field.Index.ANALYZED));
- writer.addDocument(document);
- }
-
- /**
- * Tests two span queries.
- *
- * @throws IOException
- */
- public void testBooleanQueryWithSpanQueries() throws IOException {
-
- doTestBooleanQueryWithSpanQueries(searcher,0.3884282f);
- }
-
- /**
- * Tests two span queries.
- *
- * @throws IOException
- */
- protected void doTestBooleanQueryWithSpanQueries(IndexSearcher s, final float expectedScore) throws IOException {
-
- final Query spanQuery = new SpanTermQuery(new Term(FIELD_TEXT, "work"));
- final BooleanQuery query = new BooleanQuery();
- query.add(spanQuery, BooleanClause.Occur.MUST);
- query.add(spanQuery, BooleanClause.Occur.MUST);
- final String[] expectedIds = new String[] { "1", "2", "3", "4" };
- final float[] expectedScores = new float[] { expectedScore, expectedScore, expectedScore, expectedScore };
- assertHits(s, query, "two span queries", expectedIds, expectedScores);
- }
-
-
- /**
- * Checks to see if the hits are what we expected.
- *
- * @param query the query to execute
- * @param description the description of the search
- * @param expectedIds the expected document ids of the hits
- * @param expectedScores the expected scores of the hits
- *
- * @throws IOException
- */
- protected static void assertHits(Searcher s, Query query, final String description, final String[] expectedIds,
- final float[] expectedScores) throws IOException {
- QueryUtils.check(query,s);
-
- final float tolerance = 1e-5f;
-
- // Hits hits = searcher.search(query);
- // hits normalizes and throws things off if one score is greater than 1.0
- TopDocs topdocs = s.search(query,null,10000);
-
- /*****
- // display the hits
- System.out.println(hits.length() + " hits for search: \"" + description + '\"');
- for (int i = 0; i < hits.length(); i++) {
- System.out.println(" " + FIELD_ID + ':' + hits.doc(i).get(FIELD_ID) + " (score:" + hits.score(i) + ')');
- }
- *****/
-
- // did we get the hits we expected
- assertEquals(expectedIds.length, topdocs.totalHits);
- for (int i = 0; i < topdocs.totalHits; i++) {
- //System.out.println(i + " exp: " + expectedIds[i]);
- //System.out.println(i + " field: " + hits.doc(i).get(FIELD_ID));
-
- int id = topdocs.scoreDocs[i].doc;
- float score = topdocs.scoreDocs[i].score;
- Document doc = s.doc(id);
- assertEquals(expectedIds[i], doc.get(FIELD_ID));
- boolean scoreEq = Math.abs(expectedScores[i] - score) < tolerance;
- if (!scoreEq) {
- System.out.println(i + " warning, expected score: " + expectedScores[i] + ", actual " + score);
- System.out.println(s.explain(query,id));
- }
- assertEquals(expectedScores[i], score, tolerance);
- assertEquals(s.explain(query,id).getValue(), score, tolerance);
- }
- }
-
-
+ }
+
}
\ No newline at end of file
diff --git a/lucene/src/test/org/apache/lucene/search/spans/TestSpansAdvanced2.java b/lucene/src/test/org/apache/lucene/search/spans/TestSpansAdvanced2.java
index 2b35dfb27ef..6a69d26c7b9 100644
--- a/lucene/src/test/org/apache/lucene/search/spans/TestSpansAdvanced2.java
+++ b/lucene/src/test/org/apache/lucene/search/spans/TestSpansAdvanced2.java
@@ -23,8 +23,8 @@ import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenFilter;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.search.*;
@@ -32,84 +32,97 @@ import org.apache.lucene.search.*;
/*******************************************************************************
* Some expanded tests to make sure my patch doesn't break other SpanTermQuery
* functionality.
- *
+ *
*/
public class TestSpansAdvanced2 extends TestSpansAdvanced {
- IndexSearcher searcher2;
- /**
- * Initializes the tests by adding documents to the index.
- */
- @Override
- protected void setUp() throws Exception {
- super.setUp();
-
- // create test index
- final IndexWriter writer = new IndexWriter(mDirectory,
- new IndexWriterConfig(TEST_VERSION_CURRENT,
- new MockAnalyzer(MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true)).setOpenMode(
- OpenMode.APPEND));
- addDocument(writer, "A", "Should we, could we, would we?");
- addDocument(writer, "B", "It should. Should it?");
- addDocument(writer, "C", "It shouldn't.");
- addDocument(writer, "D", "Should we, should we, should we.");
- writer.close();
-
- // re-open the searcher since we added more docs
- searcher2 = new IndexSearcher(mDirectory, true);
- }
-
- /**
- * Verifies that the index has the correct number of documents.
- *
- * @throws Exception
- */
- public void testVerifyIndex() throws Exception {
- final IndexReader reader = IndexReader.open(mDirectory, true);
- assertEquals(8, reader.numDocs());
- reader.close();
- }
-
- /**
- * Tests a single span query that matches multiple documents.
- *
- * @throws IOException
- */
- public void testSingleSpanQuery() throws IOException {
-
- final Query spanQuery = new SpanTermQuery(new Term(FIELD_TEXT, "should"));
- final String[] expectedIds = new String[] { "B", "D", "1", "2", "3", "4", "A" };
- final float[] expectedScores = new float[] { 0.625f, 0.45927936f, 0.35355338f, 0.35355338f, 0.35355338f,
- 0.35355338f, 0.26516503f, };
- assertHits(searcher2, spanQuery, "single span query", expectedIds, expectedScores);
- }
-
- /**
- * Tests a single span query that matches multiple documents.
- *
- * @throws IOException
- */
- public void testMultipleDifferentSpanQueries() throws IOException {
-
- final Query spanQuery1 = new SpanTermQuery(new Term(FIELD_TEXT, "should"));
- final Query spanQuery2 = new SpanTermQuery(new Term(FIELD_TEXT, "we"));
- final BooleanQuery query = new BooleanQuery();
- query.add(spanQuery1, BooleanClause.Occur.MUST);
- query.add(spanQuery2, BooleanClause.Occur.MUST);
- final String[] expectedIds = new String[] { "D", "A" };
- // these values were pre LUCENE-413
- // final float[] expectedScores = new float[] { 0.93163157f, 0.20698164f };
- final float[] expectedScores = new float[] { 1.0191123f, 0.93163157f };
- assertHits(searcher2, query, "multiple different span queries", expectedIds, expectedScores);
- }
-
- /**
- * Tests two span queries.
- *
- * @throws IOException
- */
- @Override
- public void testBooleanQueryWithSpanQueries() throws IOException {
-
- doTestBooleanQueryWithSpanQueries(searcher2, 0.73500174f);
- }
+ IndexSearcher searcher2;
+ IndexReader reader2;
+
+ /**
+ * Initializes the tests by adding documents to the index.
+ */
+ @Override
+ protected void setUp() throws Exception {
+ super.setUp();
+
+ // create test index
+ final RandomIndexWriter writer = new RandomIndexWriter(random, mDirectory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(
+ MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true))
+ .setOpenMode(OpenMode.APPEND));
+ addDocument(writer, "A", "Should we, could we, would we?");
+ addDocument(writer, "B", "It should. Should it?");
+ addDocument(writer, "C", "It shouldn't.");
+ addDocument(writer, "D", "Should we, should we, should we.");
+ reader2 = writer.getReader();
+ writer.close();
+
+ // re-open the searcher since we added more docs
+ searcher2 = new IndexSearcher(reader2);
+ }
+
+ @Override
+ protected void tearDown() throws Exception {
+ searcher2.close();
+ reader2.close();
+ super.tearDown();
+ }
+
+ /**
+ * Verifies that the index has the correct number of documents.
+ *
+ * @throws Exception
+ */
+ public void testVerifyIndex() throws Exception {
+ final IndexReader reader = IndexReader.open(mDirectory, true);
+ assertEquals(8, reader.numDocs());
+ reader.close();
+ }
+
+ /**
+ * Tests a single span query that matches multiple documents.
+ *
+ * @throws IOException
+ */
+ public void testSingleSpanQuery() throws IOException {
+
+ final Query spanQuery = new SpanTermQuery(new Term(FIELD_TEXT, "should"));
+ final String[] expectedIds = new String[] {"B", "D", "1", "2", "3", "4",
+ "A"};
+ final float[] expectedScores = new float[] {0.625f, 0.45927936f,
+ 0.35355338f, 0.35355338f, 0.35355338f, 0.35355338f, 0.26516503f,};
+ assertHits(searcher2, spanQuery, "single span query", expectedIds,
+ expectedScores);
+ }
+
+ /**
+ * Tests a single span query that matches multiple documents.
+ *
+ * @throws IOException
+ */
+ public void testMultipleDifferentSpanQueries() throws IOException {
+
+ final Query spanQuery1 = new SpanTermQuery(new Term(FIELD_TEXT, "should"));
+ final Query spanQuery2 = new SpanTermQuery(new Term(FIELD_TEXT, "we"));
+ final BooleanQuery query = new BooleanQuery();
+ query.add(spanQuery1, BooleanClause.Occur.MUST);
+ query.add(spanQuery2, BooleanClause.Occur.MUST);
+ final String[] expectedIds = new String[] {"D", "A"};
+ // these values were pre LUCENE-413
+ // final float[] expectedScores = new float[] { 0.93163157f, 0.20698164f };
+ final float[] expectedScores = new float[] {1.0191123f, 0.93163157f};
+ assertHits(searcher2, query, "multiple different span queries",
+ expectedIds, expectedScores);
+ }
+
+ /**
+ * Tests two span queries.
+ *
+ * @throws IOException
+ */
+ @Override
+ public void testBooleanQueryWithSpanQueries() throws IOException {
+
+ doTestBooleanQueryWithSpanQueries(searcher2, 0.73500174f);
+ }
}
diff --git a/lucene/src/test/org/apache/lucene/util/LuceneTestCaseJ4.java b/lucene/src/test/org/apache/lucene/util/LuceneTestCaseJ4.java
index 1ab00626dce..396c1b9e906 100644
--- a/lucene/src/test/org/apache/lucene/util/LuceneTestCaseJ4.java
+++ b/lucene/src/test/org/apache/lucene/util/LuceneTestCaseJ4.java
@@ -33,6 +33,7 @@ import java.io.File;
import java.io.PrintStream;
import java.io.IOException;
import java.util.Arrays;
+import java.util.Hashtable;
import java.util.Iterator;
import java.util.Random;
import java.util.ArrayList;
@@ -330,6 +331,30 @@ public class LuceneTestCaseJ4 {
return new Random(seed);
}
+ private static Hashtable,Long> staticSeeds = new Hashtable,Long>();
+
+ /**
+ * Returns a {@link Random} instance for generating random numbers from a beforeclass
+ * annotated method.
+ * The random seed is logged during test execution and printed to System.out on any failure
+ * for reproducing the test using {@link #newStaticRandom(Class, long)} with the recorded seed
+ * .
+ */
+ public static Random newStaticRandom(Class> clazz) {
+ return newStaticRandom(clazz, seedRnd.nextLong());
+ }
+
+ /**
+ * Returns a {@link Random} instance for generating random numbers from a beforeclass
+ * annotated method.
+ * If an error occurs in the test that is not reproducible, you can use this method to
+ * initialize the number generator with the seed that was printed out during the failing test.
+ */
+ public static Random newStaticRandom(Class> clazz, long seed) {
+ staticSeeds.put(clazz, Long.valueOf(seed));
+ return new Random(seed);
+ }
+
public String getName() {
return this.name;
}
@@ -348,6 +373,11 @@ public class LuceneTestCaseJ4 {
// We get here from InterceptTestCaseEvents on the 'failed' event....
public void reportAdditionalFailureInfo() {
+ Long staticSeed = staticSeeds.get(getClass());
+ if (staticSeed != null) {
+ System.out.println("NOTE: random static seed of testclass '" + getName() + "' was: " + staticSeed);
+ }
+
if (seed != null) {
System.out.println("NOTE: random seed of testcase '" + getName() + "' was: " + seed);
}