mirror of https://github.com/apache/lucene.git
LUCENE-7872: TopDocs.totalHits is now a long.
This commit is contained in:
parent
2d26d7e871
commit
44d1f1fe3f
|
@ -74,6 +74,8 @@ API Changes
|
|||
was optimized to work directly instead of being implemented on getFields.
|
||||
(David Smiley)
|
||||
|
||||
* LUCENE-7872: TopDocs.totalHits is now a long. (Adrien Grand, hossman)
|
||||
|
||||
Bug Fixes
|
||||
|
||||
* LUCENE-7626: IndexWriter will no longer accept broken token offsets
|
||||
|
|
|
@ -121,3 +121,13 @@ use TopGroupsCollector.
|
|||
Support for legacy numerics has been removed since legacy numerics had been
|
||||
deprecated since Lucene 6.0. Points should be used instead, see
|
||||
org.apache.lucene.index.PointValues for an introduction.
|
||||
|
||||
## TopDocs.totalHits is now a long (LUCENE-7872)
|
||||
|
||||
TopDocs.totalHits is now a long so that TopDocs instances can be used to
|
||||
represent top hits that have more than 2B matches. This is necessary for the
|
||||
case that multiple TopDocs instances are merged together with TopDocs#merge as
|
||||
they might have more than 2B matches in total. However TopDocs instances
|
||||
returned by IndexSearcher will still have a total number of hits which is less
|
||||
than 2B since Lucene indexes are still bound to at most 2B documents, so it
|
||||
can safely be casted to an int in that case.
|
||||
|
|
|
@ -122,7 +122,7 @@ public class DatasetSplitter {
|
|||
|
||||
// iterate over existing documents
|
||||
for (GroupDocs<Object> group : topGroups.groups) {
|
||||
int totalHits = group.totalHits;
|
||||
long totalHits = group.totalHits;
|
||||
double testSize = totalHits * testRatio;
|
||||
int tc = 0;
|
||||
double cvSize = totalHits * crossValidationRatio;
|
||||
|
|
|
@ -23,7 +23,7 @@ import org.apache.lucene.util.PriorityQueue;
|
|||
public class TopDocs {
|
||||
|
||||
/** The total number of hits for the query. */
|
||||
public int totalHits;
|
||||
public long totalHits;
|
||||
|
||||
/** The top hits for the query. */
|
||||
public ScoreDoc[] scoreDocs;
|
||||
|
@ -45,11 +45,11 @@ public class TopDocs {
|
|||
}
|
||||
|
||||
/** Constructs a TopDocs with a default maxScore=Float.NaN. */
|
||||
TopDocs(int totalHits, ScoreDoc[] scoreDocs) {
|
||||
TopDocs(long totalHits, ScoreDoc[] scoreDocs) {
|
||||
this(totalHits, scoreDocs, Float.NaN);
|
||||
}
|
||||
|
||||
public TopDocs(int totalHits, ScoreDoc[] scoreDocs, float maxScore) {
|
||||
public TopDocs(long totalHits, ScoreDoc[] scoreDocs, float maxScore) {
|
||||
this.totalHits = totalHits;
|
||||
this.scoreDocs = scoreDocs;
|
||||
this.maxScore = maxScore;
|
||||
|
@ -266,7 +266,7 @@ public class TopDocs {
|
|||
queue = new MergeSortQueue(sort, shardHits);
|
||||
}
|
||||
|
||||
int totalHitCount = 0;
|
||||
long totalHitCount = 0;
|
||||
int availHitCount = 0;
|
||||
float maxScore = Float.MIN_VALUE;
|
||||
for(int shardIDX=0;shardIDX<shardHits.length;shardIDX++) {
|
||||
|
|
|
@ -32,7 +32,7 @@ public class TopFieldDocs extends TopDocs {
|
|||
* @param fields The sort criteria used to find the top hits.
|
||||
* @param maxScore The maximum score encountered.
|
||||
*/
|
||||
public TopFieldDocs (int totalHits, ScoreDoc[] scoreDocs, SortField[] fields, float maxScore) {
|
||||
public TopFieldDocs (long totalHits, ScoreDoc[] scoreDocs, SortField[] fields, float maxScore) {
|
||||
super (totalHits, scoreDocs, maxScore);
|
||||
this.fields = fields;
|
||||
}
|
||||
|
|
|
@ -261,7 +261,7 @@ public class Test2BTerms extends LuceneTestCase {
|
|||
final BytesRef term = terms.get(random().nextInt(terms.size()));
|
||||
System.out.println("TEST: search " + term);
|
||||
final long t0 = System.currentTimeMillis();
|
||||
final int count = s.search(new TermQuery(new Term("field", term)), 1).totalHits;
|
||||
final long count = s.search(new TermQuery(new Term("field", term)), 1).totalHits;
|
||||
if (count <= 0) {
|
||||
System.out.println(" FAILED: count=" + count);
|
||||
failed = true;
|
||||
|
|
|
@ -76,7 +76,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
modifier.commit();
|
||||
|
||||
Term term = new Term("city", "Amsterdam");
|
||||
int hitCount = getHitCount(dir, term);
|
||||
long hitCount = getHitCount(dir, term);
|
||||
assertEquals(1, hitCount);
|
||||
if (VERBOSE) {
|
||||
System.out.println("\nTEST: now delete by term=" + term);
|
||||
|
@ -177,7 +177,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
IndexReader reader = DirectoryReader.open(dir);
|
||||
assertEquals(1, reader.numDocs());
|
||||
|
||||
int hitCount = getHitCount(dir, new Term("id", String.valueOf(id)));
|
||||
long hitCount = getHitCount(dir, new Term("id", String.valueOf(id)));
|
||||
assertEquals(1, hitCount);
|
||||
reader.close();
|
||||
modifier.close();
|
||||
|
@ -470,10 +470,10 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
modifier.addDocument(doc);
|
||||
}
|
||||
|
||||
private int getHitCount(Directory dir, Term term) throws IOException {
|
||||
private long getHitCount(Directory dir, Term term) throws IOException {
|
||||
IndexReader reader = DirectoryReader.open(dir);
|
||||
IndexSearcher searcher = newSearcher(reader);
|
||||
int hitCount = searcher.search(new TermQuery(term), 1000).totalHits;
|
||||
long hitCount = searcher.search(new TermQuery(term), 1000).totalHits;
|
||||
reader.close();
|
||||
return hitCount;
|
||||
}
|
||||
|
@ -802,7 +802,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
// one of the two files hits
|
||||
|
||||
Term term = new Term("city", "Amsterdam");
|
||||
int hitCount = getHitCount(dir, term);
|
||||
long hitCount = getHitCount(dir, term);
|
||||
assertEquals(1, hitCount);
|
||||
|
||||
// open the writer again (closed above)
|
||||
|
|
|
@ -768,7 +768,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
|||
threads[i].start();
|
||||
}
|
||||
|
||||
int lastCount = 0;
|
||||
long lastCount = 0;
|
||||
while(threadDone.get() == false) {
|
||||
DirectoryReader r2 = DirectoryReader.openIfChanged(r);
|
||||
if (r2 != null) {
|
||||
|
@ -776,7 +776,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
|||
r = r2;
|
||||
Query q = new TermQuery(new Term("indexname", "test"));
|
||||
IndexSearcher searcher = newSearcher(r);
|
||||
final int count = searcher.search(q, 10).totalHits;
|
||||
final long count = searcher.search(q, 10).totalHits;
|
||||
assertTrue(count >= lastCount);
|
||||
lastCount = count;
|
||||
}
|
||||
|
@ -793,7 +793,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
|||
}
|
||||
Query q = new TermQuery(new Term("indexname", "test"));
|
||||
IndexSearcher searcher = newSearcher(r);
|
||||
final int count = searcher.search(q, 10).totalHits;
|
||||
final long count = searcher.search(q, 10).totalHits;
|
||||
assertTrue(count >= lastCount);
|
||||
|
||||
assertEquals(0, excs.size());
|
||||
|
|
|
@ -107,7 +107,7 @@ public class TestManyFields extends LuceneTestCase {
|
|||
|
||||
IndexReader reader = DirectoryReader.open(dir);
|
||||
IndexSearcher searcher = newSearcher(reader);
|
||||
int totalHits = searcher.search(new TermQuery(new Term("field", "aaa")), 1).totalHits;
|
||||
long totalHits = searcher.search(new TermQuery(new Term("field", "aaa")), 1).totalHits;
|
||||
assertEquals(n*100, totalHits);
|
||||
reader.close();
|
||||
|
||||
|
|
|
@ -85,7 +85,7 @@ public class TestAutomatonQuery extends LuceneTestCase {
|
|||
return new Term(FN, value);
|
||||
}
|
||||
|
||||
private int automatonQueryNrHits(AutomatonQuery query) throws IOException {
|
||||
private long automatonQueryNrHits(AutomatonQuery query) throws IOException {
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: run aq=" + query);
|
||||
}
|
||||
|
|
|
@ -94,7 +94,7 @@ public class TestAutomatonQueryUnicode extends LuceneTestCase {
|
|||
return new Term(FN, value);
|
||||
}
|
||||
|
||||
private int automatonQueryNrHits(AutomatonQuery query) throws IOException {
|
||||
private long automatonQueryNrHits(AutomatonQuery query) throws IOException {
|
||||
return searcher.search(query, 5).totalHits;
|
||||
}
|
||||
|
||||
|
|
|
@ -51,7 +51,7 @@ public class TestBooleanOr extends LuceneTestCase {
|
|||
private IndexReader reader;
|
||||
|
||||
|
||||
private int search(Query q) throws IOException {
|
||||
private long search(Query q) throws IOException {
|
||||
QueryUtils.check(random(), q,searcher);
|
||||
return searcher.search(q, 1000).totalHits;
|
||||
}
|
||||
|
|
|
@ -142,7 +142,7 @@ public class TestLRUQueryCache extends LuceneTestCase {
|
|||
TotalHitCountCollector collector = new TotalHitCountCollector();
|
||||
searcher.search(q, collector); // will use the cache
|
||||
final int totalHits1 = collector.getTotalHits();
|
||||
final int totalHits2 = searcher.search(q, 1).totalHits; // will not use the cache because of scores
|
||||
final long totalHits2 = searcher.search(q, 1).totalHits; // will not use the cache because of scores
|
||||
assertEquals(totalHits2, totalHits1);
|
||||
} finally {
|
||||
mgr.release(searcher);
|
||||
|
|
|
@ -68,7 +68,7 @@ public class TestRegexpQuery extends LuceneTestCase {
|
|||
return new Term(FN, value);
|
||||
}
|
||||
|
||||
private int regexQueryNrHits(String regex) throws IOException {
|
||||
private long regexQueryNrHits(String regex) throws IOException {
|
||||
RegexpQuery query = new RegexpQuery(newTerm(regex));
|
||||
return searcher.search(query, 5).totalHits;
|
||||
}
|
||||
|
|
|
@ -271,7 +271,7 @@ public class TestTermRangeQuery extends LuceneTestCase {
|
|||
initializeIndex(new String[] {"A", "B", "", "C", "D"}, analyzer);
|
||||
IndexReader reader = DirectoryReader.open(dir);
|
||||
IndexSearcher searcher = newSearcher(reader);
|
||||
int numHits = searcher.search(query, 1000).totalHits;
|
||||
long numHits = searcher.search(query, 1000).totalHits;
|
||||
// When Lucene-38 is fixed, use the assert on the next line:
|
||||
assertEquals("A,B,<empty string>,C,D => A, B & <empty string> are in range", 3, numHits);
|
||||
// until Lucene-38 is fixed, use this assert:
|
||||
|
@ -306,7 +306,7 @@ public class TestTermRangeQuery extends LuceneTestCase {
|
|||
initializeIndex(new String[]{"A", "B", "","C", "D"}, analyzer);
|
||||
IndexReader reader = DirectoryReader.open(dir);
|
||||
IndexSearcher searcher = newSearcher(reader);
|
||||
int numHits = searcher.search(query, 1000).totalHits;
|
||||
long numHits = searcher.search(query, 1000).totalHits;
|
||||
// When Lucene-38 is fixed, use the assert on the next line:
|
||||
assertEquals("A,B,<empty string>,C,D => A,B,<empty string>,C in range", 4, numHits);
|
||||
// until Lucene-38 is fixed, use this assert
|
||||
|
|
|
@ -333,7 +333,7 @@ public class TestSpans extends LuceneTestCase {
|
|||
}
|
||||
|
||||
// LUCENE-1404
|
||||
private int hitCount(IndexSearcher searcher, String word) throws Throwable {
|
||||
private long hitCount(IndexSearcher searcher, String word) throws Throwable {
|
||||
return searcher.search(new TermQuery(new Term("text", word)), 10).totalHits;
|
||||
}
|
||||
|
||||
|
|
|
@ -152,7 +152,7 @@ public class SearchFiles {
|
|||
TopDocs results = searcher.search(query, 5 * hitsPerPage);
|
||||
ScoreDoc[] hits = results.scoreDocs;
|
||||
|
||||
int numTotalHits = results.totalHits;
|
||||
int numTotalHits = Math.toIntExact(results.totalHits);
|
||||
System.out.println(numTotalHits + " total matching documents");
|
||||
|
||||
int start = 0;
|
||||
|
|
|
@ -39,7 +39,7 @@ public class GroupDocs<T> {
|
|||
public final ScoreDoc[] scoreDocs;
|
||||
|
||||
/** Total hits within this group */
|
||||
public final int totalHits;
|
||||
public final long totalHits;
|
||||
|
||||
/** Matches the groupSort passed to {@link
|
||||
* FirstPassGroupingCollector}. */
|
||||
|
@ -47,7 +47,7 @@ public class GroupDocs<T> {
|
|||
|
||||
public GroupDocs(float score,
|
||||
float maxScore,
|
||||
int totalHits,
|
||||
long totalHits,
|
||||
ScoreDoc[] scoreDocs,
|
||||
T groupValue,
|
||||
Object[] groupSortValues) {
|
||||
|
|
|
@ -942,7 +942,7 @@ public class TestQueryParser extends QueryParserTestBase {
|
|||
DirectoryReader ir = DirectoryReader.open(ramDir);
|
||||
IndexSearcher is = new IndexSearcher(ir);
|
||||
|
||||
int hits = is.search(q, 10).totalHits;
|
||||
long hits = is.search(q, 10).totalHits;
|
||||
ir.close();
|
||||
ramDir.close();
|
||||
if (hits == 1){
|
||||
|
|
|
@ -105,7 +105,7 @@ public class TestCoreParser extends LuceneTestCase {
|
|||
|
||||
public void testCustomFieldUserQueryXML() throws ParserException, IOException {
|
||||
Query q = parse("UserInputQueryCustomField.xml");
|
||||
int h = searcher().search(q, 1000).totalHits;
|
||||
long h = searcher().search(q, 1000).totalHits;
|
||||
assertEquals("UserInputQueryCustomField should produce 0 result ", 0, h);
|
||||
}
|
||||
|
||||
|
|
|
@ -93,7 +93,7 @@ public class TestQueryTemplateManager extends LuceneTestCase {
|
|||
Query q = builder.getQuery(doc.getDocumentElement());
|
||||
|
||||
//Run the query
|
||||
int h = searcher.search(q, 1000).totalHits;
|
||||
long h = searcher.search(q, 1000).totalHits;
|
||||
|
||||
//Check we have the expected number of results
|
||||
int expectedHits = Integer.parseInt(queryFormProperties.getProperty("expectedMatches"));
|
||||
|
|
|
@ -212,7 +212,7 @@ class SimpleReplicaNode extends ReplicaNode {
|
|||
IndexSearcher searcher = mgr.acquire();
|
||||
try {
|
||||
long version = ((DirectoryReader) searcher.getIndexReader()).getVersion();
|
||||
int hitCount = searcher.search(new TermQuery(new Term("body", "the")), 1).totalHits;
|
||||
int hitCount = searcher.count(new TermQuery(new Term("body", "the")));
|
||||
//node.message("version=" + version + " searcher=" + searcher);
|
||||
out.writeVLong(version);
|
||||
out.writeVInt(hitCount);
|
||||
|
@ -229,7 +229,7 @@ class SimpleReplicaNode extends ReplicaNode {
|
|||
IndexSearcher searcher = mgr.acquire();
|
||||
try {
|
||||
long version = ((DirectoryReader) searcher.getIndexReader()).getVersion();
|
||||
int hitCount = searcher.search(new MatchAllDocsQuery(), 1).totalHits;
|
||||
int hitCount = searcher.count(new MatchAllDocsQuery());
|
||||
//node.message("version=" + version + " searcher=" + searcher);
|
||||
out.writeVLong(version);
|
||||
out.writeVInt(hitCount);
|
||||
|
|
|
@ -190,7 +190,7 @@ public class SpatialExample extends LuceneTestCase {
|
|||
}
|
||||
|
||||
private void assertDocMatchedIds(IndexSearcher indexSearcher, TopDocs docs, int... ids) throws IOException {
|
||||
int[] gotIds = new int[docs.totalHits];
|
||||
int[] gotIds = new int[Math.toIntExact(docs.totalHits)];
|
||||
for (int i = 0; i < gotIds.length; i++) {
|
||||
gotIds[i] = indexSearcher.doc(docs.scoreDocs[i].doc).getField("id").numericValue().intValue();
|
||||
}
|
||||
|
|
|
@ -205,10 +205,10 @@ public abstract class SpatialTestCase extends LuceneTestCase {
|
|||
|
||||
protected static class SearchResults {
|
||||
|
||||
public int numFound;
|
||||
public long numFound;
|
||||
public List<SearchResult> results;
|
||||
|
||||
public SearchResults(int numFound, List<SearchResult> results) {
|
||||
public SearchResults(long numFound, List<SearchResult> results) {
|
||||
this.numFound = numFound;
|
||||
this.results = results;
|
||||
}
|
||||
|
|
|
@ -126,7 +126,7 @@ public class TestSuggestField extends LuceneTestCase {
|
|||
SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader);
|
||||
PrefixCompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", "ab"));
|
||||
TopSuggestDocs lookupDocs = suggestIndexSearcher.suggest(query, 3, false);
|
||||
assertThat(lookupDocs.totalHits, equalTo(0));
|
||||
assertThat(lookupDocs.totalHits, equalTo(0L));
|
||||
reader.close();
|
||||
iw.close();
|
||||
}
|
||||
|
@ -470,7 +470,7 @@ public class TestSuggestField extends LuceneTestCase {
|
|||
// calling suggest with filter that does not match any documents should early terminate
|
||||
PrefixCompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", "abc_"), filter);
|
||||
TopSuggestDocs suggest = indexSearcher.suggest(query, num, false);
|
||||
assertThat(suggest.totalHits, equalTo(0));
|
||||
assertThat(suggest.totalHits, equalTo(0L));
|
||||
reader.close();
|
||||
iw.close();
|
||||
}
|
||||
|
@ -498,7 +498,7 @@ public class TestSuggestField extends LuceneTestCase {
|
|||
SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader);
|
||||
PrefixCompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", "abc_"));
|
||||
TopSuggestDocs suggest = indexSearcher.suggest(query, num, false);
|
||||
assertThat(suggest.totalHits, equalTo(0));
|
||||
assertThat(suggest.totalHits, equalTo(0L));
|
||||
|
||||
reader.close();
|
||||
iw.close();
|
||||
|
|
|
@ -31,6 +31,7 @@ import java.util.concurrent.Executors;
|
|||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
|
@ -323,7 +324,7 @@ public abstract class ThreadedIndexingAndSearchingTestCase extends LuceneTestCas
|
|||
protected void runSearchThreads(final long stopTimeMS) throws Exception {
|
||||
final int numThreads = TestUtil.nextInt(random(), 1, 5);
|
||||
final Thread[] searchThreads = new Thread[numThreads];
|
||||
final AtomicInteger totHits = new AtomicInteger();
|
||||
final AtomicLong totHits = new AtomicLong();
|
||||
|
||||
// silly starting guess:
|
||||
final AtomicInteger totTermCount = new AtomicInteger(100);
|
||||
|
@ -664,11 +665,11 @@ public abstract class ThreadedIndexingAndSearchingTestCase extends LuceneTestCas
|
|||
}
|
||||
}
|
||||
|
||||
private int runQuery(IndexSearcher s, Query q) throws Exception {
|
||||
private long runQuery(IndexSearcher s, Query q) throws Exception {
|
||||
s.search(q, 10);
|
||||
int hitCount = s.search(q, 10, new Sort(new SortField("titleDV", SortField.Type.STRING))).totalHits;
|
||||
long hitCount = s.search(q, 10, new Sort(new SortField("titleDV", SortField.Type.STRING))).totalHits;
|
||||
final Sort dvSort = new Sort(new SortField("titleDV", SortField.Type.STRING));
|
||||
int hitCount2 = s.search(q, 10, dvSort).totalHits;
|
||||
long hitCount2 = s.search(q, 10, dvSort).totalHits;
|
||||
assertEquals(hitCount, hitCount2);
|
||||
return hitCount;
|
||||
}
|
||||
|
|
|
@ -110,7 +110,7 @@ public class LTRRescorer extends Rescorer {
|
|||
}
|
||||
});
|
||||
|
||||
topN = Math.min(topN, firstPassTopDocs.totalHits);
|
||||
topN = Math.toIntExact(Math.min(topN, firstPassTopDocs.totalHits));
|
||||
final ScoreDoc[] reranked = new ScoreDoc[topN];
|
||||
final List<LeafReaderContext> leaves = searcher.getIndexReader().leaves();
|
||||
final LTRScoringQuery.ModelWeight modelWeight = (LTRScoringQuery.ModelWeight) searcher
|
||||
|
|
|
@ -234,7 +234,7 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
LOG.debug(core.getCoreContainer()
|
||||
.getZkController().getNodeName()
|
||||
+ " replicated "
|
||||
+ searcher.search(new MatchAllDocsQuery(), 1).totalHits
|
||||
+ searcher.count(new MatchAllDocsQuery())
|
||||
+ " from "
|
||||
+ leaderUrl
|
||||
+ " gen:"
|
||||
|
@ -781,7 +781,7 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
RefCounted<SolrIndexSearcher> searchHolder = core.getNewestSearcher(false);
|
||||
SolrIndexSearcher searcher = searchHolder.get();
|
||||
try {
|
||||
final int totalHits = searcher.search(new MatchAllDocsQuery(), 1).totalHits;
|
||||
final int totalHits = searcher.count(new MatchAllDocsQuery());
|
||||
final String nodeName = core.getCoreContainer().getZkController().getNodeName();
|
||||
LOG.debug("[{}] {} [{} total hits]", nodeName, op, totalHits);
|
||||
} finally {
|
||||
|
|
|
@ -171,12 +171,12 @@ public class SpellCheckComponent extends SearchComponent implements SolrCoreAwar
|
|||
customParams.add(getCustomParams(checkerName, params));
|
||||
}
|
||||
|
||||
Integer hitsInteger = (Integer) rb.rsp.getToLog().get("hits");
|
||||
Number hitsLong = (Number) rb.rsp.getToLog().get("hits");
|
||||
long hits = 0;
|
||||
if (hitsInteger == null) {
|
||||
if (hitsLong == null) {
|
||||
hits = rb.getNumberDocumentsFound();
|
||||
} else {
|
||||
hits = hitsInteger.longValue();
|
||||
hits = hitsLong.longValue();
|
||||
}
|
||||
|
||||
SpellingResult spellingResult = null;
|
||||
|
@ -543,7 +543,7 @@ public class SpellCheckComponent extends SearchComponent implements SolrCoreAwar
|
|||
NamedList expandedCollation = (NamedList) o;
|
||||
SpellCheckCollation coll = new SpellCheckCollation();
|
||||
coll.setCollationQuery((String) expandedCollation.get("collationQuery"));
|
||||
coll.setHits((Integer) expandedCollation.get("hits"));
|
||||
coll.setHits(((Number) expandedCollation.get("hits")).longValue());
|
||||
if(maxCollationTries>0)
|
||||
{
|
||||
coll.setInternalRank((Integer) expandedCollation.get("collationInternalRank"));
|
||||
|
|
|
@ -45,7 +45,7 @@ public interface DocList extends DocSet {
|
|||
* Hence it's always true that matches() >= size()
|
||||
* @return number of matches for the search(query & any filters)
|
||||
*/
|
||||
public int matches();
|
||||
public long matches();
|
||||
|
||||
|
||||
/***
|
||||
|
|
|
@ -36,7 +36,7 @@ public class DocSlice extends DocSetBase implements DocList {
|
|||
final int[] docs; // a slice of documents (docs 0-100 of the query)
|
||||
|
||||
final float[] scores; // optional score list
|
||||
final int matches;
|
||||
final long matches;
|
||||
final float maxScore;
|
||||
|
||||
/**
|
||||
|
@ -48,7 +48,7 @@ public class DocSlice extends DocSetBase implements DocList {
|
|||
* @param scores array of scores that corresponds to docs, may be null
|
||||
* @param matches total number of matches for the query
|
||||
*/
|
||||
public DocSlice(int offset, int len, int[] docs, float[] scores, int matches, float maxScore) {
|
||||
public DocSlice(int offset, int len, int[] docs, float[] scores, long matches, float maxScore) {
|
||||
this.offset=offset;
|
||||
this.len=len;
|
||||
this.docs=docs;
|
||||
|
@ -87,7 +87,7 @@ public class DocSlice extends DocSetBase implements DocList {
|
|||
@Override
|
||||
public int size() { return len; }
|
||||
@Override
|
||||
public int matches() { return matches; }
|
||||
public long matches() { return matches; }
|
||||
|
||||
|
||||
@Override
|
||||
|
|
|
@ -605,7 +605,7 @@ public class Grouping {
|
|||
}
|
||||
|
||||
protected DocList getDocList(GroupDocs groups) {
|
||||
int max = groups.totalHits;
|
||||
int max = Math.toIntExact(groups.totalHits);
|
||||
int off = groupOffset;
|
||||
int len = docsPerGroup;
|
||||
if (format == Format.simple) {
|
||||
|
|
|
@ -101,7 +101,7 @@ public class TopGroupsResultTransformer implements ShardResultTransformer<List<C
|
|||
String key = entry.getKey();
|
||||
NamedList commandResult = entry.getValue();
|
||||
Integer totalGroupedHitCount = (Integer) commandResult.get("totalGroupedHitCount");
|
||||
Integer totalHits = (Integer) commandResult.get("totalHits");
|
||||
Number totalHits = (Number) commandResult.get("totalHits"); // previously Integer now Long
|
||||
if (totalHits != null) {
|
||||
Integer matches = (Integer) commandResult.get("matches");
|
||||
Float maxScore = (Float) commandResult.get("maxScore");
|
||||
|
@ -114,9 +114,9 @@ public class TopGroupsResultTransformer implements ShardResultTransformer<List<C
|
|||
ScoreDoc[] scoreDocs = transformToNativeShardDoc(documents, groupSort, shard, schema);
|
||||
final TopDocs topDocs;
|
||||
if (withinGroupSort.equals(Sort.RELEVANCE)) {
|
||||
topDocs = new TopDocs(totalHits, scoreDocs, maxScore);
|
||||
topDocs = new TopDocs(totalHits.longValue(), scoreDocs, maxScore);
|
||||
} else {
|
||||
topDocs = new TopFieldDocs(totalHits, scoreDocs, withinGroupSort.getSort(), maxScore);
|
||||
topDocs = new TopFieldDocs(totalHits.longValue(), scoreDocs, withinGroupSort.getSort(), maxScore);
|
||||
}
|
||||
result.put(key, new QueryCommandResult(topDocs, matches));
|
||||
continue;
|
||||
|
@ -129,7 +129,7 @@ public class TopGroupsResultTransformer implements ShardResultTransformer<List<C
|
|||
String groupValue = commandResult.getName(i);
|
||||
@SuppressWarnings("unchecked")
|
||||
NamedList<Object> groupResult = (NamedList<Object>) commandResult.getVal(i);
|
||||
Integer totalGroupHits = (Integer) groupResult.get("totalHits");
|
||||
Number totalGroupHits = (Number) groupResult.get("totalHits"); // // previously Integer now Long
|
||||
Float maxScore = (Float) groupResult.get("maxScore");
|
||||
if (maxScore == null) {
|
||||
maxScore = Float.NaN;
|
||||
|
@ -140,7 +140,7 @@ public class TopGroupsResultTransformer implements ShardResultTransformer<List<C
|
|||
ScoreDoc[] scoreDocs = transformToNativeShardDoc(documents, withinGroupSort, shard, schema);
|
||||
|
||||
BytesRef groupValueRef = groupValue != null ? new BytesRef(groupValue) : null;
|
||||
groupDocs.add(new GroupDocs<>(Float.NaN, maxScore, totalGroupHits, scoreDocs, groupValueRef, null));
|
||||
groupDocs.add(new GroupDocs<>(Float.NaN, maxScore, totalGroupHits.longValue(), scoreDocs, groupValueRef, null));
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
|
|
|
@ -20,7 +20,7 @@ import org.apache.solr.common.util.NamedList;
|
|||
|
||||
public class SpellCheckCollation implements Comparable<SpellCheckCollation> {
|
||||
private NamedList<String> misspellingsAndCorrections;
|
||||
private int hits;
|
||||
private long hits;
|
||||
private int internalRank;
|
||||
private String collationQuery;
|
||||
|
||||
|
@ -42,11 +42,11 @@ public class SpellCheckCollation implements Comparable<SpellCheckCollation> {
|
|||
this.misspellingsAndCorrections = misspellingsAndCorrections;
|
||||
}
|
||||
|
||||
public int getHits() {
|
||||
public long getHits() {
|
||||
return hits;
|
||||
}
|
||||
|
||||
public void setHits(int hits) {
|
||||
public void setHits(long hits) {
|
||||
this.hits = hits;
|
||||
}
|
||||
|
||||
|
|
|
@ -93,7 +93,7 @@ public class SpellCheckCollator {
|
|||
|
||||
PossibilityIterator.RankedSpellPossibility possibility = possibilityIter.next();
|
||||
String collationQueryStr = getCollation(originalQuery, possibility.corrections);
|
||||
int hits = 0;
|
||||
long hits = 0;
|
||||
|
||||
if (verifyCandidateWithQuery) {
|
||||
tryNo++;
|
||||
|
@ -162,7 +162,7 @@ public class SpellCheckCollator {
|
|||
checkResponse.setFieldFlags(f |= SolrIndexSearcher.TERMINATE_EARLY);
|
||||
}
|
||||
queryComponent.process(checkResponse);
|
||||
hits = (Integer) checkResponse.rsp.getToLog().get("hits");
|
||||
hits = ((Number) checkResponse.rsp.getToLog().get("hits")).longValue();
|
||||
} catch (EarlyTerminatingCollectorException etce) {
|
||||
assert (docCollectionLimit > 0);
|
||||
assert 0 < etce.getNumberScanned();
|
||||
|
@ -171,7 +171,7 @@ public class SpellCheckCollator {
|
|||
if (etce.getNumberScanned() == maxDocId) {
|
||||
hits = etce.getNumberCollected();
|
||||
} else {
|
||||
hits = (int) ( ((float)( maxDocId * etce.getNumberCollected() ))
|
||||
hits = (long) ( ((float)( maxDocId * etce.getNumberCollected() ))
|
||||
/ (float)etce.getNumberScanned() );
|
||||
}
|
||||
} catch (Exception e) {
|
||||
|
|
|
@ -303,7 +303,7 @@ public class TestRangeQuery extends SolrTestCaseJ4 {
|
|||
|
||||
// now build some random queries (against *any* field) and validate that using it in a DBQ changes
|
||||
// the index by the expected number of docs
|
||||
int numDocsLeftInIndex = numDocs;
|
||||
long numDocsLeftInIndex = numDocs;
|
||||
final int numDBQs= atLeast(10);
|
||||
for (int i=0; i < numDBQs; i++) {
|
||||
int lower = TestUtil.nextInt(random(), 2 * l, u);
|
||||
|
@ -337,7 +337,7 @@ public class TestRangeQuery extends SolrTestCaseJ4 {
|
|||
assertU(commit());
|
||||
try (SolrQueryRequest req = req("q","*:*","rows","0","_trace_after_dbq",dbq)) {
|
||||
SolrQueryResponse qr = h.queryAndResponse(handler, req);
|
||||
final int allDocsFound = ((ResultContext)qr.getResponse()).getDocList().matches();
|
||||
final long allDocsFound = ((ResultContext)qr.getResponse()).getDocList().matches();
|
||||
assertEquals(dbq, numDocsLeftInIndex, allDocsFound);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -396,8 +396,7 @@ public class SpellCheckCollatorTest extends SolrTestCaseJ4 {
|
|||
assertTrue(!usedcollations.contains(multipleCollation));
|
||||
usedcollations.add(multipleCollation);
|
||||
|
||||
int hits = (Integer) expandedCollation.get("hits");
|
||||
assertTrue(hits == 1);
|
||||
assertEquals(new Long(1L), expandedCollation.get("hits"));
|
||||
|
||||
NamedList misspellingsAndCorrections = (NamedList) expandedCollation.get("misspellingsAndCorrections");
|
||||
assertTrue(misspellingsAndCorrections.size() == 3);
|
||||
|
@ -473,7 +472,7 @@ public class SpellCheckCollatorTest extends SolrTestCaseJ4 {
|
|||
*/
|
||||
"//lst[@name='spellcheck']/bool[@name='correctlySpelled']='false'",
|
||||
"//lst[@name='spellcheck']/lst[@name='collations']/lst[@name='collation']/str[@name='collationQuery']='teststop:(flew AND from AND heathrow)'",
|
||||
"//lst[@name='spellcheck']/lst[@name='collations']/lst[@name='collation']/int[@name='hits']=1",
|
||||
"//lst[@name='spellcheck']/lst[@name='collations']/lst[@name='collation']/long[@name='hits']=1",
|
||||
"//lst[@name='spellcheck']/lst[@name='collations']/lst[@name='collation']/lst[@name='misspellingsAndCorrections']/str[@name='form']='from'"
|
||||
);
|
||||
|
||||
|
@ -497,7 +496,7 @@ public class SpellCheckCollatorTest extends SolrTestCaseJ4 {
|
|||
"//lst[@name='spellcheck']/lst[@name='suggestions']/lst[@name='june']/arr[@name='suggestion']/lst/str[@name='word']='jane'",
|
||||
"//lst[@name='spellcheck']/bool[@name='correctlySpelled']='false'",
|
||||
"//lst[@name='spellcheck']/lst[@name='collations']/lst[@name='collation']/str[@name='collationQuery']='teststop:(jane AND customs)'",
|
||||
"//lst[@name='spellcheck']/lst[@name='collations']/lst[@name='collation']/int[@name='hits']=1",
|
||||
"//lst[@name='spellcheck']/lst[@name='collations']/lst[@name='collation']/long[@name='hits']=1",
|
||||
"//lst[@name='spellcheck']/lst[@name='collations']/lst[@name='collation']/lst[@name='misspellingsAndCorrections']/str[@name='june']='jane'"
|
||||
);
|
||||
//SOLR-5090, alternativeTermCount==0 was being evaluated, sometimes would throw NPE
|
||||
|
@ -529,7 +528,7 @@ public class SpellCheckCollatorTest extends SolrTestCaseJ4 {
|
|||
assertQ(req(reusedParams,
|
||||
CommonParams.Q, "teststop:metnoia")
|
||||
, xpathPrefix + "str[@name='collationQuery']='teststop:metanoia'"
|
||||
, xpathPrefix + "int[@name='hits']=6"
|
||||
, xpathPrefix + "long[@name='hits']=6"
|
||||
);
|
||||
|
||||
// specifying 0 means "exact" same as default, but specifing a value greater
|
||||
|
@ -540,7 +539,7 @@ public class SpellCheckCollatorTest extends SolrTestCaseJ4 {
|
|||
CommonParams.Q, "teststop:metnoia",
|
||||
SpellingParams.SPELLCHECK_COLLATE_MAX_COLLECT_DOCS, val)
|
||||
, xpathPrefix + "str[@name='collationQuery']='teststop:metanoia'"
|
||||
, xpathPrefix + "int[@name='hits']=6"
|
||||
, xpathPrefix + "long[@name='hits']=6"
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -553,7 +552,7 @@ public class SpellCheckCollatorTest extends SolrTestCaseJ4 {
|
|||
CommonParams.Q, "teststop:metnoia",
|
||||
SpellingParams.SPELLCHECK_COLLATE_MAX_COLLECT_DOCS, ""+val)
|
||||
, xpathPrefix + "str[@name='collationQuery']='teststop:metanoia'"
|
||||
, xpathPrefix + "int[@name='hits' and . <= 17 and 0 < .]"
|
||||
, xpathPrefix + "long[@name='hits' and . <= 17 and 0 < .]"
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -570,7 +569,7 @@ public class SpellCheckCollatorTest extends SolrTestCaseJ4 {
|
|||
CommonParams.Q, "teststop:everother",
|
||||
SpellingParams.SPELLCHECK_COLLATE_MAX_COLLECT_DOCS, ""+val)
|
||||
, xpathPrefix + "str[@name='collationQuery']='teststop:everyother'"
|
||||
, xpathPrefix + "int[@name='hits' and " + min + " <= . and . <= " + max + "]"
|
||||
, xpathPrefix + "long[@name='hits' and " + min + " <= . and . <= " + max + "]"
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
@ -69,7 +69,7 @@ public class SpellCheckResponse {
|
|||
NamedList<Object> expandedCollation = (NamedList<Object>) o;
|
||||
String collationQuery
|
||||
= (String) expandedCollation.get("collationQuery");
|
||||
int hits = (Integer) expandedCollation.get("hits");
|
||||
long hits = ((Number) expandedCollation.get("hits")).longValue();
|
||||
@SuppressWarnings("unchecked")
|
||||
NamedList<String> misspellingsAndCorrections
|
||||
= (NamedList<String>) expandedCollation.get("misspellingsAndCorrections");
|
||||
|
|
Loading…
Reference in New Issue