LUCENE-3559: remove IndexSearcher.maxDoc/docFreq

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1197458 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Robert Muir 2011-11-04 09:35:15 +00:00
parent b19a207c86
commit 38fa222128
18 changed files with 50 additions and 54 deletions

View File

@ -194,6 +194,10 @@ Changes in backwards compatibility policy
to the Bits interface. This method should never do I/O for performance reasons.
(Mike McCandless, Uwe Schindler, Robert Muir, Chris Male, Yonik Seeley,
Jason Rutherglen, Paul Elschot)
* LUCENE-3559: The methods "docFreq" and "maxDoc" on IndexSearcher were removed,
as these are no longer used by the scoring system. See MIGRATE.txt for more
details. (Robert Muir)
Changes in Runtime Behavior

View File

@ -395,6 +395,16 @@ LUCENE-1458, LUCENE-2111: Flexible Indexing
The scorePayload method now takes a BytesRef. It is never null.
* LUCENE-3559: The methods "docFreq" and "maxDoc" on IndexSearcher were removed,
as these are no longer used by the scoring system.
If you were using these casually in your code for reasons unrelated to scoring,
call them on the IndexSearcher's reader instead: getIndexReader().
If you were subclassing IndexSearcher and overriding these methods to alter
scoring, override IndexSearcher's termStatistics() and collectionStatistics()
methods instead.
* LUCENE-3283: Lucene's core o.a.l.queryParser QueryParsers have been consolidated into module/queryparser,
where other QueryParsers from the codebase will also be placed. The following classes were moved:
- o.a.l.queryParser.CharStream -> o.a.l.queryparser.classic.CharStream

View File

@ -215,36 +215,6 @@ public class IndexSearcher implements Closeable {
return reader;
}
/** Expert: Returns one greater than the largest possible document number.
*
* @see org.apache.lucene.index.IndexReader#maxDoc()
*/
public int maxDoc() {
return reader.maxDoc();
}
/** Returns total docFreq for this term. */
public int docFreq(final Term term) throws IOException {
if (executor == null) {
return reader.docFreq(term);
} else {
final ExecutionHelper<Integer> runner = new ExecutionHelper<Integer>(executor);
for(int i = 0; i < leafContexts.length; i++) {
final IndexReader leaf = leafContexts[i].reader;
runner.submit(new Callable<Integer>() {
public Integer call() throws IOException {
return Integer.valueOf(leaf.docFreq(term));
}
});
}
int docFreq = 0;
for (Integer num : runner) {
docFreq += num.intValue();
}
return docFreq;
}
}
/* Sugar for <code>.getIndexReader().document(docID)</code> */
public Document doc(int docID) throws CorruptIndexException, IOException {
return reader.document(docID);

View File

@ -571,11 +571,11 @@ public abstract class TFIDFSimilarity extends Similarity {
* idf(docFreq, searcher.maxDoc());
* </pre>
*
* Note that {@link IndexSearcher#maxDoc()} is used instead of
* Note that {@link CollectionStatistics#maxDoc()} is used instead of
* {@link org.apache.lucene.index.IndexReader#numDocs() IndexReader#numDocs()} because also
* {@link IndexSearcher#docFreq(Term)} is used, and when the latter
* is inaccurate, so is {@link IndexSearcher#maxDoc()}, and in the same direction.
* In addition, {@link IndexSearcher#maxDoc()} is more efficient to compute
* {@link TermStatistics#docFreq()} is used, and when the latter
* is inaccurate, so is {@link CollectionStatistics#maxDoc()}, and in the same direction.
* In addition, {@link CollectionStatistics#maxDoc()} is more efficient to compute
*
* @param collectionStats collection-level statistics
* @param termStats term-level statistics for the term

View File

@ -61,7 +61,7 @@ public class CheckHits {
ignore.add(Integer.valueOf(results[i]));
}
int maxDoc = searcher.maxDoc();
int maxDoc = searcher.getIndexReader().maxDoc();
for (int doc = 0; doc < maxDoc; doc++) {
if (ignore.contains(Integer.valueOf(doc))) continue;

View File

@ -717,7 +717,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
expectedCount -= 17;
}
}
assertEquals("maxDoc=" + searcher.maxDoc() + " numDocs=" + searcher.getIndexReader().numDocs(), expectedCount, hits.length);
assertEquals("maxDoc=" + searcher.getIndexReader().maxDoc() + " numDocs=" + searcher.getIndexReader().numDocs(), expectedCount, hits.length);
searcher.close();
reader.close();
if (i == N) {

View File

@ -73,8 +73,9 @@ public class TestSearchAfter extends LuceneTestCase {
}
void assertQuery(Query query, Filter filter) throws Exception {
TopDocs all = searcher.search(query, filter, searcher.maxDoc());
int pageSize = _TestUtil.nextInt(random, 1, searcher.maxDoc()*2);
int maxDoc = searcher.getIndexReader().maxDoc();
TopDocs all = searcher.search(query, filter, maxDoc);
int pageSize = _TestUtil.nextInt(random, 1, maxDoc*2);
int pageStart = 0;
ScoreDoc lastBottom = null;
while (pageStart < all.totalHits) {

View File

@ -134,7 +134,7 @@ public class TestSubScorerFreqs extends LuceneTestCase {
CountingCollector c = new CountingCollector(TopScoreDocCollector.create(10,
true));
s.search(q, null, c);
final int maxDocs = s.maxDoc();
final int maxDocs = s.getIndexReader().maxDoc();
assertEquals(maxDocs, c.docCounts.size());
for (int i = 0; i < maxDocs; i++) {
Map<Query, Float> doc0 = c.docCounts.get(i);
@ -171,7 +171,7 @@ public class TestSubScorerFreqs extends LuceneTestCase {
CountingCollector c = new CountingCollector(TopScoreDocCollector.create(
10, true), occur);
s.search(query, null, c);
final int maxDocs = s.maxDoc();
final int maxDocs = s.getIndexReader().maxDoc();
assertEquals(maxDocs, c.docCounts.size());
boolean includeOptional = occur.contains(Occur.SHOULD.toString());
for (int i = 0; i < maxDocs; i++) {
@ -201,7 +201,7 @@ public class TestSubScorerFreqs extends LuceneTestCase {
CountingCollector c = new CountingCollector(TopScoreDocCollector.create(10,
true));
s.search(q, null, c);
final int maxDocs = s.maxDoc();
final int maxDocs = s.getIndexReader().maxDoc();
assertEquals(maxDocs, c.docCounts.size());
for (int i = 0; i < maxDocs; i++) {
Map<Query, Float> doc0 = c.docCounts.get(i);

View File

@ -109,7 +109,7 @@ public class TestTopKInEachNodeResultHandler extends LuceneTestCase {
// Get all of the documents and run the query, then do different
// facet counts and compare to control
Query q = new TermQuery(new Term("content", "alpha"));
ScoredDocIdCollector scoredDoc = ScoredDocIdCollector.create(is.maxDoc(), true);
ScoredDocIdCollector scoredDoc = ScoredDocIdCollector.create(ir.maxDoc(), true);
// Collector collector = new MultiCollector(scoredDoc);
is.search(q, scoredDoc);

View File

@ -61,7 +61,7 @@ public abstract class BaseSampleTestTopK extends BaseTestTopK {
// Get all of the documents and run the query, then do different
// facet counts and compare to control
Query q = new TermQuery(new Term(CONTENT_FIELD, BETA)); // 90% of the docs
ScoredDocIdCollector docCollector = ScoredDocIdCollector.create(searcher.maxDoc(), false);
ScoredDocIdCollector docCollector = ScoredDocIdCollector.create(indexReader.maxDoc(), false);
FacetSearchParams expectedSearchParams = searchParamsWithRequests(K, partitionSize);
FacetsCollector fc = new FacetsCollector(expectedSearchParams, indexReader, taxoReader);

View File

@ -106,9 +106,10 @@ public class AllGroupHeadsCollectorTest extends LuceneTestCase {
doc.add(newField("id", "7", StringField.TYPE_STORED));
w.addDocument(doc);
IndexSearcher indexSearcher = new IndexSearcher(w.getReader());
IndexReader reader = w.getReader();
IndexSearcher indexSearcher = new IndexSearcher(reader);
w.close();
int maxDoc = indexSearcher.maxDoc();
int maxDoc = reader.maxDoc();
Sort sortWithinGroup = new Sort(new SortField("id", SortField.Type.INT, true));
AbstractAllGroupHeadsCollector c1 = createRandomCollector(groupField, sortWithinGroup);

View File

@ -950,7 +950,7 @@ public class TestGrouping extends LuceneTestCase {
}
if (searchIter == 14) {
for(int docIDX=0;docIDX<s.maxDoc();docIDX++) {
for(int docIDX=0;docIDX<s.getIndexReader().maxDoc();docIDX++) {
System.out.println("ID=" + docIDToID[docIDX] + " explain=" + s.explain(query, docIDX));
}
}

View File

@ -148,7 +148,7 @@ public class DocFreqValueSource extends ValueSource {
@Override
public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
IndexSearcher searcher = (IndexSearcher)context.get("searcher");
int docfreq = searcher.docFreq(new Term(indexedField, indexedBytes));
int docfreq = searcher.getIndexReader().docFreq(new Term(indexedField, indexedBytes));
return new ConstIntDocValues(docfreq, this);
}

View File

@ -46,8 +46,8 @@ public class IDFValueSource extends DocFreqValueSource {
if (!(sim instanceof TFIDFSimilarity)) {
throw new UnsupportedOperationException("requires a TFIDFSimilarity (such as DefaultSimilarity)");
}
int docfreq = searcher.docFreq(new Term(indexedField, indexedBytes));
float idf = ((TFIDFSimilarity)sim).idf(docfreq, searcher.maxDoc());
int docfreq = searcher.getIndexReader().docFreq(new Term(indexedField, indexedBytes));
float idf = ((TFIDFSimilarity)sim).idf(docfreq, searcher.getIndexReader().maxDoc());
return new ConstDoubleDocValues(idf, this);
}
}

View File

@ -42,7 +42,7 @@ public class MaxDocValueSource extends ValueSource {
@Override
public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
IndexSearcher searcher = (IndexSearcher)context.get("searcher");
return new ConstIntDocValues(searcher.maxDoc(), this);
return new ConstIntDocValues(searcher.getIndexReader().maxDoc(), this);
}
@Override

View File

@ -470,7 +470,9 @@ public class SpellChecker implements java.io.Closeable {
// obtainSearcher calls ensureOpen
final IndexSearcher indexSearcher = obtainSearcher();
try{
return indexSearcher.docFreq(new Term(F_WORD, word)) > 0;
// TODO: we should use ReaderUtil+seekExact, we dont care about the docFreq
// this is just an existence check
return indexSearcher.getIndexReader().docFreq(new Term(F_WORD, word)) > 0;
} finally {
releaseSearcher(indexSearcher);
}
@ -494,8 +496,9 @@ public class SpellChecker implements java.io.Closeable {
IndexSearcher indexSearcher = obtainSearcher();
final List<TermsEnum> termsEnums = new ArrayList<TermsEnum>();
if (searcher.maxDoc() > 0) {
new ReaderUtil.Gather(searcher.getIndexReader()) {
final IndexReader reader = searcher.getIndexReader();
if (reader.maxDoc() > 0) {
new ReaderUtil.Gather(reader) {
@Override
protected void add(int base, IndexReader r) throws IOException {
Terms terms = r.terms(F_WORD);

View File

@ -55,7 +55,7 @@ class LuceneQueryOptimizer {
}
public TopDocs optimize(BooleanQuery original,
IndexSearcher searcher,
SolrIndexSearcher searcher,
int numHits,
Query[] queryOut,
Filter[] filterOut

View File

@ -191,6 +191,13 @@ public class SolrIndexSearcher extends IndexSearcher implements SolrInfoMBean {
return core;
}
public final int maxDoc() {
return reader.maxDoc();
}
public final int docFreq(Term term) throws IOException {
return reader.docFreq(term);
}
/** Register sub-objects such as caches
*/