leaves = searcher.getIndexReader().leaves();
- Weight weight = searcher.createNormalizedWeight(query, ScoreMode.COMPLETE);
+ Query rewritten = searcher.rewrite(query);
+ Weight weight = searcher.createWeight(rewritten, ScoreMode.COMPLETE, 1);
// Now merge sort docIDs from hits, with reader's leaves:
int hitUpto = 0;
diff --git a/lucene/core/src/java/org/apache/lucene/search/package-info.java b/lucene/core/src/java/org/apache/lucene/search/package-info.java
index 7e53da46620..21832c775dd 100644
--- a/lucene/core/src/java/org/apache/lucene/search/package-info.java
+++ b/lucene/core/src/java/org/apache/lucene/search/package-info.java
@@ -453,8 +453,8 @@
* Assuming we are not sorting (since sorting doesn't affect the raw Lucene score),
* we call one of the search methods of the IndexSearcher, passing in the
* {@link org.apache.lucene.search.Weight Weight} object created by
- * {@link org.apache.lucene.search.IndexSearcher#createNormalizedWeight(org.apache.lucene.search.Query,ScoreMode)
- * IndexSearcher.createNormalizedWeight(Query,boolean)} and the number of results we want.
+ * {@link org.apache.lucene.search.IndexSearcher#createWeight(org.apache.lucene.search.Query,ScoreMode,float)
+ * IndexSearcher.createWeight(Query,ScoreMode,float)} and the number of results we want.
* This method returns a {@link org.apache.lucene.search.TopDocs TopDocs} object,
* which is an internal collection of search results. The IndexSearcher creates
* a {@link org.apache.lucene.search.TopScoreDocCollector TopScoreDocCollector} and
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java b/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java
index 876328a4a42..48a28e2e791 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java
@@ -1332,4 +1332,85 @@ public class TestAddIndexes extends LuceneTestCase {
assertEquals("cannot change index sort from to ", message);
IOUtils.close(r1, dir1, w2, dir2);
}
+
+ public void testAddIndexesDVUpdateSameSegmentName() throws Exception {
+ Directory dir1 = newDirectory();
+ IndexWriterConfig iwc1 = newIndexWriterConfig(new MockAnalyzer(random()));
+ IndexWriter w1 = new IndexWriter(dir1, iwc1);
+ Document doc = new Document();
+ doc.add(new StringField("id", "1", Field.Store.YES));
+ doc.add(new StringField("version", "1", Field.Store.YES));
+ doc.add(new NumericDocValuesField("soft_delete", 1));
+ w1.addDocument(doc);
+ w1.flush();
+
+ w1.updateDocValues(new Term("id", "1"), new NumericDocValuesField("soft_delete", 1));
+ w1.commit();
+ w1.close();
+
+ IndexWriterConfig iwc2 = newIndexWriterConfig(new MockAnalyzer(random()));
+ Directory dir2 = newDirectory();
+ IndexWriter w2 = new IndexWriter(dir2, iwc2);
+ w2.addIndexes(dir1);
+ w2.commit();
+ w2.close();
+
+ if (VERBOSE) {
+ System.out.println("\nTEST: now open w3");
+ }
+ IndexWriterConfig iwc3 = newIndexWriterConfig(new MockAnalyzer(random()));
+ if (VERBOSE) {
+ iwc3.setInfoStream(System.out);
+ }
+ IndexWriter w3 = new IndexWriter(dir2, iwc3);
+ w3.close();
+
+ iwc3 = newIndexWriterConfig(new MockAnalyzer(random()));
+ w3 = new IndexWriter(dir2, iwc3);
+ w3.close();
+ dir1.close();
+ dir2.close();
+ }
+
+ public void testAddIndexesDVUpdateNewSegmentName() throws Exception {
+ Directory dir1 = newDirectory();
+ IndexWriterConfig iwc1 = newIndexWriterConfig(new MockAnalyzer(random()));
+ IndexWriter w1 = new IndexWriter(dir1, iwc1);
+ Document doc = new Document();
+ doc.add(new StringField("id", "1", Field.Store.YES));
+ doc.add(new StringField("version", "1", Field.Store.YES));
+ doc.add(new NumericDocValuesField("soft_delete", 1));
+ w1.addDocument(doc);
+ w1.flush();
+
+ w1.updateDocValues(new Term("id", "1"), new NumericDocValuesField("soft_delete", 1));
+ w1.commit();
+ w1.close();
+
+ IndexWriterConfig iwc2 = newIndexWriterConfig(new MockAnalyzer(random()));
+ Directory dir2 = newDirectory();
+ IndexWriter w2 = new IndexWriter(dir2, iwc2);
+ w2.addDocument(new Document());
+ w2.commit();
+
+ w2.addIndexes(dir1);
+ w2.commit();
+ w2.close();
+
+ if (VERBOSE) {
+ System.out.println("\nTEST: now open w3");
+ }
+ IndexWriterConfig iwc3 = newIndexWriterConfig(new MockAnalyzer(random()));
+ if (VERBOSE) {
+ iwc3.setInfoStream(System.out);
+ }
+ IndexWriter w3 = new IndexWriter(dir2, iwc3);
+ w3.close();
+
+ iwc3 = newIndexWriterConfig(new MockAnalyzer(random()));
+ w3 = new IndexWriter(dir2, iwc3);
+ w3.close();
+ dir1.close();
+ dir2.close();
+ }
}
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReaderReopen.java b/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReaderReopen.java
index b38696a3754..468e8e23184 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReaderReopen.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReaderReopen.java
@@ -43,6 +43,7 @@ import org.apache.lucene.store.Directory;
import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.store.MockDirectoryWrapper.FakeIOException;
import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.TestUtil;
@@ -1013,6 +1014,59 @@ public class TestDirectoryReaderReopen extends LuceneTestCase {
DirectoryReader.openIfChanged(r);
});
}
+
+ public void testReuseUnchangedLeafReaderOnDVUpdate() throws IOException {
+ Directory dir = newDirectory();
+ IndexWriterConfig indexWriterConfig = newIndexWriterConfig();
+ indexWriterConfig.setMergePolicy(NoMergePolicy.INSTANCE);
+ IndexWriter writer = new IndexWriter(dir, indexWriterConfig);
+
+ Document doc = new Document();
+ doc.add(new StringField("id", "1", Field.Store.YES));
+ doc.add(new StringField("version", "1", Field.Store.YES));
+ doc.add(new NumericDocValuesField("some_docvalue", 2));
+ writer.addDocument(doc);
+ doc = new Document();
+ doc.add(new StringField("id", "2", Field.Store.YES));
+ doc.add(new StringField("version", "1", Field.Store.YES));
+ writer.addDocument(doc);
+ writer.commit();
+ DirectoryReader reader = DirectoryReader.open(dir);
+ assertEquals(2, reader.numDocs());
+ assertEquals(2, reader.maxDoc());
+ assertEquals(0, reader.numDeletedDocs());
+
+ doc = new Document();
+ doc.add(new StringField("id", "1", Field.Store.YES));
+ doc.add(new StringField("version", "2", Field.Store.YES));
+ writer.updateDocValues(new Term("id", "1"), new NumericDocValuesField("some_docvalue", 1));
+ writer.commit();
+ DirectoryReader newReader = DirectoryReader.openIfChanged(reader);
+ assertNotSame(newReader, reader);
+ reader.close();
+ reader = newReader;
+ assertEquals(2, reader.numDocs());
+ assertEquals(2, reader.maxDoc());
+ assertEquals(0, reader.numDeletedDocs());
+
+ doc = new Document();
+ doc.add(new StringField("id", "3", Field.Store.YES));
+ doc.add(new StringField("version", "3", Field.Store.YES));
+ writer.updateDocument(new Term("id", "3"), doc);
+ writer.commit();
+
+ newReader = DirectoryReader.openIfChanged(reader);
+ assertNotSame(newReader, reader);
+ assertEquals(2, newReader.getSequentialSubReaders().size());
+ assertEquals(1, reader.getSequentialSubReaders().size());
+ assertSame(reader.getSequentialSubReaders().get(0), newReader.getSequentialSubReaders().get(0));
+ reader.close();
+ reader = newReader;
+ assertEquals(3, reader.numDocs());
+ assertEquals(3, reader.maxDoc());
+ assertEquals(0, reader.numDeletedDocs());
+ IOUtils.close(reader, writer, dir);
+ }
}
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSoftDeletesDirectoryReaderWrapper.java b/lucene/core/src/test/org/apache/lucene/index/TestSoftDeletesDirectoryReaderWrapper.java
new file mode 100644
index 00000000000..dea7bc977be
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/index/TestSoftDeletesDirectoryReaderWrapper.java
@@ -0,0 +1,199 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.lucene.index;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestSoftDeletesDirectoryReaderWrapper extends LuceneTestCase {
+
+ public void testReuseUnchangedLeafReader() throws IOException {
+ Directory dir = newDirectory();
+ IndexWriterConfig indexWriterConfig = newIndexWriterConfig();
+ String softDeletesField = "soft_delete";
+ indexWriterConfig.setSoftDeletesField(softDeletesField);
+ indexWriterConfig.setMergePolicy(NoMergePolicy.INSTANCE);
+ IndexWriter writer = new IndexWriter(dir, indexWriterConfig);
+
+ Document doc = new Document();
+ doc.add(new StringField("id", "1", Field.Store.YES));
+ doc.add(new StringField("version", "1", Field.Store.YES));
+ writer.addDocument(doc);
+ doc = new Document();
+ doc.add(new StringField("id", "2", Field.Store.YES));
+ doc.add(new StringField("version", "1", Field.Store.YES));
+ writer.addDocument(doc);
+ writer.commit();
+ DirectoryReader reader = new SoftDeletesDirectoryReaderWrapper(DirectoryReader.open(dir), softDeletesField);
+ assertEquals(2, reader.numDocs());
+ assertEquals(2, reader.maxDoc());
+ assertEquals(0, reader.numDeletedDocs());
+
+ doc = new Document();
+ doc.add(new StringField("id", "1", Field.Store.YES));
+ doc.add(new StringField("version", "2", Field.Store.YES));
+ writer.softUpdateDocument(new Term("id", "1"), doc, new NumericDocValuesField("soft_delete", 1));
+
+ doc = new Document();
+ doc.add(new StringField("id", "3", Field.Store.YES));
+ doc.add(new StringField("version", "1", Field.Store.YES));
+ writer.addDocument(doc);
+ writer.commit();
+
+ DirectoryReader newReader = DirectoryReader.openIfChanged(reader);
+ assertNotSame(newReader, reader);
+ reader.close();
+ reader = newReader;
+ assertEquals(3, reader.numDocs());
+ assertEquals(4, reader.maxDoc());
+ assertEquals(1, reader.numDeletedDocs());
+
+ doc = new Document();
+ doc.add(new StringField("id", "1", Field.Store.YES));
+ doc.add(new StringField("version", "3", Field.Store.YES));
+ writer.softUpdateDocument(new Term("id", "1"), doc, new NumericDocValuesField("soft_delete", 1));
+ writer.commit();
+
+ newReader = DirectoryReader.openIfChanged(reader);
+ assertNotSame(newReader, reader);
+ assertEquals(3, newReader.getSequentialSubReaders().size());
+ assertEquals(2, reader.getSequentialSubReaders().size());
+ assertSame(reader.getSequentialSubReaders().get(0), newReader.getSequentialSubReaders().get(0));
+ assertNotSame(reader.getSequentialSubReaders().get(1), newReader.getSequentialSubReaders().get(1));
+ assertTrue(isWrapped(reader.getSequentialSubReaders().get(0)));
+ // last one has no soft deletes
+ assertFalse(isWrapped(reader.getSequentialSubReaders().get(1)));
+
+ assertTrue(isWrapped(newReader.getSequentialSubReaders().get(0)));
+ assertTrue(isWrapped(newReader.getSequentialSubReaders().get(1)));
+ // last one has no soft deletes
+ assertFalse(isWrapped(newReader.getSequentialSubReaders().get(2)));
+ reader.close();
+ reader = newReader;
+ assertEquals(3, reader.numDocs());
+ assertEquals(5, reader.maxDoc());
+ assertEquals(2, reader.numDeletedDocs());
+ IOUtils.close(reader, writer, dir);
+ }
+
+ private boolean isWrapped(LeafReader reader) {
+ return reader instanceof SoftDeletesDirectoryReaderWrapper.SoftDeletesFilterLeafReader;
+ }
+
+ public void testMixSoftAndHardDeletes() throws IOException {
+ Directory dir = newDirectory();
+ IndexWriterConfig indexWriterConfig = newIndexWriterConfig();
+ String softDeletesField = "soft_delete";
+ indexWriterConfig.setSoftDeletesField(softDeletesField);
+ IndexWriter writer = new IndexWriter(dir, indexWriterConfig);
+ Set uniqueDocs = new HashSet<>();
+ for (int i = 0; i < 100; i++) {
+ int docId = random().nextInt(5);
+ uniqueDocs.add(docId);
+ Document doc = new Document();
+ doc.add(new StringField("id", String.valueOf(docId), Field.Store.YES));
+ if (docId % 2 == 0) {
+ writer.updateDocument(new Term("id", String.valueOf(docId)), doc);
+ } else {
+ writer.softUpdateDocument(new Term("id", String.valueOf(docId)), doc,
+ new NumericDocValuesField(softDeletesField, 0));
+ }
+ }
+
+ writer.commit();
+ writer.close();
+ DirectoryReader reader = new SoftDeletesDirectoryReaderWrapper(DirectoryReader.open(dir), softDeletesField);
+ assertEquals(uniqueDocs.size(), reader.numDocs());
+ IndexSearcher searcher = new IndexSearcher(reader);
+ for (Integer docId : uniqueDocs) {
+ assertEquals(1, searcher.search(new TermQuery(new Term("id", docId.toString())), 1).totalHits);
+ }
+
+ IOUtils.close(reader, dir);
+ }
+
+ public void testReaderCacheKey() throws IOException {
+ Directory dir = newDirectory();
+ IndexWriterConfig indexWriterConfig = newIndexWriterConfig();
+ String softDeletesField = "soft_delete";
+ indexWriterConfig.setSoftDeletesField(softDeletesField);
+ indexWriterConfig.setMergePolicy(NoMergePolicy.INSTANCE);
+ IndexWriter writer = new IndexWriter(dir, indexWriterConfig);
+
+ Document doc = new Document();
+ doc.add(new StringField("id", "1", Field.Store.YES));
+ doc.add(new StringField("version", "1", Field.Store.YES));
+ writer.addDocument(doc);
+ doc = new Document();
+ doc.add(new StringField("id", "2", Field.Store.YES));
+ doc.add(new StringField("version", "1", Field.Store.YES));
+ writer.addDocument(doc);
+ writer.commit();
+ DirectoryReader reader = new SoftDeletesDirectoryReaderWrapper(DirectoryReader.open(dir), softDeletesField);
+ IndexReader.CacheHelper readerCacheHelper = reader.leaves().get(0).reader().getReaderCacheHelper();
+ AtomicInteger leafCalled = new AtomicInteger(0);
+ AtomicInteger dirCalled = new AtomicInteger(0);
+ readerCacheHelper.addClosedListener(key -> {
+ leafCalled.incrementAndGet();
+ assertSame(key, readerCacheHelper.getKey());
+ });
+ IndexReader.CacheHelper dirReaderCacheHelper = reader.getReaderCacheHelper();
+ dirReaderCacheHelper.addClosedListener(key -> {
+ dirCalled.incrementAndGet();
+ assertSame(key, dirReaderCacheHelper.getKey());
+ });
+ assertEquals(2, reader.numDocs());
+ assertEquals(2, reader.maxDoc());
+ assertEquals(0, reader.numDeletedDocs());
+
+ doc = new Document();
+ doc.add(new StringField("id", "1", Field.Store.YES));
+ doc.add(new StringField("version", "2", Field.Store.YES));
+ writer.softUpdateDocument(new Term("id", "1"), doc, new NumericDocValuesField("soft_delete", 1));
+
+ doc = new Document();
+ doc.add(new StringField("id", "3", Field.Store.YES));
+ doc.add(new StringField("version", "1", Field.Store.YES));
+ writer.addDocument(doc);
+ writer.commit();
+ assertEquals(0, leafCalled.get());
+ assertEquals(0, dirCalled.get());
+ DirectoryReader newReader = DirectoryReader.openIfChanged(reader);
+ assertEquals(0, leafCalled.get());
+ assertEquals(0, dirCalled.get());
+ assertNotSame(newReader.getReaderCacheHelper().getKey(), reader.getReaderCacheHelper().getKey());
+ assertNotSame(newReader, reader);
+ reader.close();
+ reader = newReader;
+ assertEquals(1, dirCalled.get());
+ assertEquals(1, leafCalled.get());
+ IOUtils.close(reader, writer, dir);
+ }
+}
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestStressNRT.java b/lucene/core/src/test/org/apache/lucene/index/TestStressNRT.java
index e6c91b87c51..06aa27756a6 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestStressNRT.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestStressNRT.java
@@ -110,7 +110,11 @@ public class TestStressNRT extends LuceneTestCase {
final RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig(new MockAnalyzer(random())), useSoftDeletes);
writer.setDoRandomForceMergeAssert(false);
writer.commit();
- reader = useSoftDeletes ? writer.getReader() : DirectoryReader.open(dir);
+ if (useSoftDeletes) {
+ reader = new SoftDeletesDirectoryReaderWrapper(DirectoryReader.open(dir), writer.w.getConfig().getSoftDeletesField());
+ } else {
+ reader = DirectoryReader.open(dir);
+ }
for (int i=0; i scoringTerms = new HashSet<>();
- searcher.createNormalizedWeight(bq, ScoreMode.COMPLETE).extractTerms(scoringTerms);
+ searcher.createWeight(searcher.rewrite(bq), ScoreMode.COMPLETE, 1).extractTerms(scoringTerms);
assertEquals(new HashSet<>(Arrays.asList(a, b)), scoringTerms);
Set matchingTerms = new HashSet<>();
- searcher.createNormalizedWeight(bq, ScoreMode.COMPLETE_NO_SCORES).extractTerms(matchingTerms);
+ searcher.createWeight(searcher.rewrite(bq), ScoreMode.COMPLETE_NO_SCORES, 1).extractTerms(matchingTerms);
assertEquals(new HashSet<>(Arrays.asList(a, b, c)), matchingTerms);
}
}
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestBooleanQueryVisitSubscorers.java b/lucene/core/src/test/org/apache/lucene/search/TestBooleanQueryVisitSubscorers.java
index de061a2f8d3..065a6de0019 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestBooleanQueryVisitSubscorers.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestBooleanQueryVisitSubscorers.java
@@ -196,7 +196,7 @@ public class TestBooleanQueryVisitSubscorers extends LuceneTestCase {
bq1.add(new TermQuery(new Term(F1, "lucene")), Occur.SHOULD);
bq1.add(new PhraseQuery(F2, "search", "engine"), Occur.SHOULD);
- Weight w1 = scorerSearcher.createNormalizedWeight(bq1.build(), ScoreMode.COMPLETE);
+ Weight w1 = scorerSearcher.createWeight(scorerSearcher.rewrite(bq1.build()), ScoreMode.COMPLETE, 1);
Scorer s1 = w1.scorer(reader.leaves().get(0));
assertEquals(0, s1.iterator().nextDoc());
assertEquals(2, s1.getChildren().size());
@@ -205,7 +205,7 @@ public class TestBooleanQueryVisitSubscorers extends LuceneTestCase {
bq2.add(new TermQuery(new Term(F1, "lucene")), Occur.SHOULD);
bq2.add(new PhraseQuery(F2, "search", "library"), Occur.SHOULD);
- Weight w2 = scorerSearcher.createNormalizedWeight(bq2.build(), ScoreMode.COMPLETE);
+ Weight w2 = scorerSearcher.createWeight(scorerSearcher.rewrite(bq2.build()), ScoreMode.COMPLETE, 1);
Scorer s2 = w2.scorer(reader.leaves().get(0));
assertEquals(0, s2.iterator().nextDoc());
assertEquals(1, s2.getChildren().size());
@@ -218,7 +218,7 @@ public class TestBooleanQueryVisitSubscorers extends LuceneTestCase {
bq.add(new PhraseQuery(F2, "search", "library"), Occur.SHOULD);
bq.setMinimumNumberShouldMatch(2);
- Weight w = scorerSearcher.createNormalizedWeight(bq.build(), ScoreMode.COMPLETE);
+ Weight w = scorerSearcher.createWeight(scorerSearcher.rewrite(bq.build()), ScoreMode.COMPLETE, 1);
Scorer s = w.scorer(reader.leaves().get(0));
assertEquals(0, s.iterator().nextDoc());
assertEquals(2, s.getChildren().size());
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestBooleanRewrites.java b/lucene/core/src/test/org/apache/lucene/search/TestBooleanRewrites.java
index 292dfa9dabf..a8b6399bc1a 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestBooleanRewrites.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestBooleanRewrites.java
@@ -93,7 +93,7 @@ public class TestBooleanRewrites extends LuceneTestCase {
BooleanQuery.Builder query2 = new BooleanQuery.Builder();
query2.add(new TermQuery(new Term("field", "a")), Occur.FILTER);
query2.add(new TermQuery(new Term("field", "b")), Occur.SHOULD);
- final Weight weight = searcher.createNormalizedWeight(query2.build(), ScoreMode.COMPLETE);
+ final Weight weight = searcher.createWeight(searcher.rewrite(query2.build()), ScoreMode.COMPLETE, 1);
final Scorer scorer = weight.scorer(reader.leaves().get(0));
assertEquals(0, scorer.iterator().nextDoc());
assertTrue(scorer.getClass().getName(), scorer instanceof FilterScorer);
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java b/lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java
index 8a8379be343..d847de64333 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java
@@ -172,7 +172,7 @@ public class TestBooleanScorer extends LuceneTestCase {
.build();
// no scores -> term scorer
- Weight weight = searcher.createNormalizedWeight(query, ScoreMode.COMPLETE_NO_SCORES);
+ Weight weight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1);
BulkScorer scorer = ((BooleanWeight) weight).booleanScorer(ctx);
assertTrue(scorer instanceof DefaultBulkScorer); // term scorer
@@ -181,7 +181,7 @@ public class TestBooleanScorer extends LuceneTestCase {
.add(new TermQuery(new Term("foo", "bar")), Occur.SHOULD) // existing term
.add(new TermQuery(new Term("foo", "baz")), Occur.SHOULD) // missing term
.build();
- weight = searcher.createNormalizedWeight(query, ScoreMode.COMPLETE);
+ weight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE, 1);
scorer = ((BooleanWeight) weight).booleanScorer(ctx);
assertTrue(scorer instanceof DefaultBulkScorer); // term scorer
@@ -210,7 +210,7 @@ public class TestBooleanScorer extends LuceneTestCase {
.add(new TermQuery(new Term("foo", "baz")), Occur.SHOULD)
.add(new TermQuery(new Term("foo", "bar")), Occur.MUST_NOT)
.build();
- Weight weight = searcher.createNormalizedWeight(query, ScoreMode.COMPLETE);
+ Weight weight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE, 1);
BulkScorer scorer = ((BooleanWeight) weight).booleanScorer(ctx);
assertTrue(scorer instanceof ReqExclBulkScorer);
@@ -219,7 +219,7 @@ public class TestBooleanScorer extends LuceneTestCase {
.add(new MatchAllDocsQuery(), Occur.SHOULD)
.add(new TermQuery(new Term("foo", "bar")), Occur.MUST_NOT)
.build();
- weight = searcher.createNormalizedWeight(query, ScoreMode.COMPLETE);
+ weight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE, 1);
scorer = ((BooleanWeight) weight).booleanScorer(ctx);
assertTrue(scorer instanceof ReqExclBulkScorer);
@@ -227,7 +227,7 @@ public class TestBooleanScorer extends LuceneTestCase {
.add(new TermQuery(new Term("foo", "baz")), Occur.MUST)
.add(new TermQuery(new Term("foo", "bar")), Occur.MUST_NOT)
.build();
- weight = searcher.createNormalizedWeight(query, ScoreMode.COMPLETE);
+ weight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE, 1);
scorer = ((BooleanWeight) weight).booleanScorer(ctx);
assertTrue(scorer instanceof ReqExclBulkScorer);
@@ -235,7 +235,7 @@ public class TestBooleanScorer extends LuceneTestCase {
.add(new TermQuery(new Term("foo", "baz")), Occur.FILTER)
.add(new TermQuery(new Term("foo", "bar")), Occur.MUST_NOT)
.build();
- weight = searcher.createNormalizedWeight(query, ScoreMode.COMPLETE);
+ weight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE, 1);
scorer = ((BooleanWeight) weight).booleanScorer(ctx);
assertTrue(scorer instanceof ReqExclBulkScorer);
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestConstantScoreQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestConstantScoreQuery.java
index f3382a5bf6c..dbc50af4fbf 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestConstantScoreQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestConstantScoreQuery.java
@@ -201,9 +201,9 @@ public class TestConstantScoreQuery extends LuceneTestCase {
PhraseQuery pq = new PhraseQuery("field", "a", "b");
- ConstantScoreQuery q = new ConstantScoreQuery(pq);
+ Query q = searcher.rewrite(new ConstantScoreQuery(pq));
- final Weight weight = searcher.createNormalizedWeight(q, ScoreMode.COMPLETE);
+ final Weight weight = searcher.createWeight(q, ScoreMode.COMPLETE, 1);
final Scorer scorer = weight.scorer(searcher.getIndexReader().leaves().get(0));
assertNotNull(scorer.twoPhaseIterator());
@@ -215,14 +215,14 @@ public class TestConstantScoreQuery extends LuceneTestCase {
public void testExtractTerms() throws Exception {
final IndexSearcher searcher = newSearcher(new MultiReader());
final TermQuery termQuery = new TermQuery(new Term("foo", "bar"));
- final ConstantScoreQuery csq = new ConstantScoreQuery(termQuery);
+ final Query csq = searcher.rewrite(new ConstantScoreQuery(termQuery));
final Set scoringTerms = new HashSet<>();
- searcher.createNormalizedWeight(csq, ScoreMode.COMPLETE).extractTerms(scoringTerms);
+ searcher.createWeight(csq, ScoreMode.COMPLETE, 1).extractTerms(scoringTerms);
assertEquals(Collections.emptySet(), scoringTerms);
final Set matchingTerms = new HashSet<>();
- searcher.createNormalizedWeight(csq, ScoreMode.COMPLETE_NO_SCORES).extractTerms(matchingTerms);
+ searcher.createWeight(csq, ScoreMode.COMPLETE_NO_SCORES, 1).extractTerms(matchingTerms);
assertEquals(Collections.singleton(new Term("foo", "bar")), matchingTerms);
}
}
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java
index 70bb9965eba..98f94268018 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java
@@ -175,7 +175,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase {
QueryUtils.check(random(), dq, s);
assertTrue(s.getTopReaderContext() instanceof LeafReaderContext);
- final Weight dw = s.createNormalizedWeight(dq, ScoreMode.COMPLETE);
+ final Weight dw = s.createWeight(s.rewrite(dq), ScoreMode.COMPLETE, 1);
LeafReaderContext context = (LeafReaderContext)s.getTopReaderContext();
final Scorer ds = dw.scorer(context);
final boolean skipOk = ds.iterator().advance(3) != DocIdSetIterator.NO_MORE_DOCS;
@@ -191,7 +191,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase {
assertTrue(s.getTopReaderContext() instanceof LeafReaderContext);
QueryUtils.check(random(), dq, s);
- final Weight dw = s.createNormalizedWeight(dq, ScoreMode.COMPLETE);
+ final Weight dw = s.createWeight(s.rewrite(dq), ScoreMode.COMPLETE, 1);
LeafReaderContext context = (LeafReaderContext)s.getTopReaderContext();
final Scorer ds = dw.scorer(context);
assertTrue("firsttime skipTo found no match",
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestDocValuesQueries.java b/lucene/core/src/test/org/apache/lucene/search/TestDocValuesQueries.java
index 3e9cabc48a2..d70c09d5372 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestDocValuesQueries.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestDocValuesQueries.java
@@ -230,7 +230,7 @@ public class TestDocValuesQueries extends LuceneTestCase {
SortedNumericDocValuesField.newSlowRangeQuery("foo", 2, 4),
SortedDocValuesField.newSlowRangeQuery("foo", new BytesRef("abc"), new BytesRef("bcd"), random().nextBoolean(), random().nextBoolean()),
SortedSetDocValuesField.newSlowRangeQuery("foo", new BytesRef("abc"), new BytesRef("bcd"), random().nextBoolean(), random().nextBoolean()))) {
- Weight w = searcher.createNormalizedWeight(query, ScoreMode.COMPLETE);
+ Weight w = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE, 1);
assertNull(w.scorer(searcher.getIndexReader().leaves().get(0)));
}
reader.close();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestIndexOrDocValuesQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestIndexOrDocValuesQuery.java
index cea9443d286..d784b120e9a 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestIndexOrDocValuesQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestIndexOrDocValuesQuery.java
@@ -67,7 +67,7 @@ public class TestIndexOrDocValuesQuery extends LuceneTestCase {
.add(new IndexOrDocValuesQuery(LongPoint.newExactQuery("f2", 2), NumericDocValuesField.newSlowRangeQuery("f2", 2L, 2L)), Occur.MUST)
.build();
- final Weight w1 = searcher.createNormalizedWeight(q1, ScoreMode.COMPLETE);
+ final Weight w1 = searcher.createWeight(searcher.rewrite(q1), ScoreMode.COMPLETE, 1);
final Scorer s1 = w1.scorer(searcher.getIndexReader().leaves().get(0));
assertNotNull(s1.twoPhaseIterator()); // means we use doc values
@@ -77,7 +77,7 @@ public class TestIndexOrDocValuesQuery extends LuceneTestCase {
.add(new IndexOrDocValuesQuery(LongPoint.newExactQuery("f2", 42), NumericDocValuesField.newSlowRangeQuery("f2", 42L, 42L)), Occur.MUST)
.build();
- final Weight w2 = searcher.createNormalizedWeight(q2, ScoreMode.COMPLETE);
+ final Weight w2 = searcher.createWeight(searcher.rewrite(q2), ScoreMode.COMPLETE, 1);
final Scorer s2 = w2.scorer(searcher.getIndexReader().leaves().get(0));
assertNull(s2.twoPhaseIterator()); // means we use points
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestLRUQueryCache.java b/lucene/core/src/test/org/apache/lucene/search/TestLRUQueryCache.java
index 9cbf29ce2cf..b2645ab325e 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestLRUQueryCache.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestLRUQueryCache.java
@@ -1141,7 +1141,7 @@ public class TestLRUQueryCache extends LuceneTestCase {
LRUQueryCache cache = new LRUQueryCache(1, Long.MAX_VALUE, context -> true);
// test that the bulk scorer is propagated when a scorer should not be cached
- Weight weight = searcher.createNormalizedWeight(new MatchAllDocsQuery(), ScoreMode.COMPLETE_NO_SCORES);
+ Weight weight = searcher.createWeight(new MatchAllDocsQuery(), ScoreMode.COMPLETE_NO_SCORES, 1);
weight = new WeightWrapper(weight, scorerCalled, bulkScorerCalled);
weight = cache.doCache(weight, NEVER_CACHE);
weight.bulkScorer(leaf);
@@ -1151,7 +1151,7 @@ public class TestLRUQueryCache extends LuceneTestCase {
// test that the doc id set is computed using the bulk scorer
bulkScorerCalled.set(false);
- weight = searcher.createNormalizedWeight(new MatchAllDocsQuery(), ScoreMode.COMPLETE_NO_SCORES);
+ weight = searcher.createWeight(new MatchAllDocsQuery(), ScoreMode.COMPLETE_NO_SCORES, 1);
weight = new WeightWrapper(weight, scorerCalled, bulkScorerCalled);
weight = cache.doCache(weight, QueryCachingPolicy.ALWAYS_CACHE);
weight.scorer(leaf);
@@ -1424,7 +1424,7 @@ public class TestLRUQueryCache extends LuceneTestCase {
AtomicBoolean scorerCreated = new AtomicBoolean(false);
Query query = new DummyQuery2(scorerCreated);
- Weight weight = searcher.createNormalizedWeight(query, ScoreMode.COMPLETE_NO_SCORES);
+ Weight weight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1);
ScorerSupplier supplier = weight.scorerSupplier(searcher.getIndexReader().leaves().get(0));
assertFalse(scorerCreated.get());
supplier.get(random().nextLong() & 0x7FFFFFFFFFFFFFFFL);
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestMinShouldMatch2.java b/lucene/core/src/test/org/apache/lucene/search/TestMinShouldMatch2.java
index f60435c57a3..c98c164f65c 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestMinShouldMatch2.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestMinShouldMatch2.java
@@ -118,7 +118,7 @@ public class TestMinShouldMatch2 extends LuceneTestCase {
}
bq.setMinimumNumberShouldMatch(minShouldMatch);
- BooleanWeight weight = (BooleanWeight) searcher.createNormalizedWeight(bq.build(), ScoreMode.COMPLETE);
+ BooleanWeight weight = (BooleanWeight) searcher.createWeight(searcher.rewrite(bq.build()), ScoreMode.COMPLETE, 1);
switch (mode) {
case DOC_VALUES:
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestPointQueries.java b/lucene/core/src/test/org/apache/lucene/search/TestPointQueries.java
index 08c37352507..7ef25652f23 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestPointQueries.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestPointQueries.java
@@ -1903,7 +1903,7 @@ public class TestPointQueries extends LuceneTestCase {
upperBound[i] = value[i] + random().nextInt(1);
}
Query query = IntPoint.newRangeQuery("point", lowerBound, upperBound);
- Weight weight = searcher.createNormalizedWeight(query, ScoreMode.COMPLETE_NO_SCORES);
+ Weight weight = searcher.createWeight(query, ScoreMode.COMPLETE_NO_SCORES, 1);
Scorer scorer = weight.scorer(searcher.getIndexReader().leaves().get(0));
assertEquals(DocIdSetIterator.all(1).getClass(), scorer.iterator().getClass());
@@ -1914,7 +1914,7 @@ public class TestPointQueries extends LuceneTestCase {
reader = w.getReader();
searcher = new IndexSearcher(reader);
searcher.setQueryCache(null);
- weight = searcher.createNormalizedWeight(query, ScoreMode.COMPLETE_NO_SCORES);
+ weight = searcher.createWeight(query, ScoreMode.COMPLETE_NO_SCORES, 1);
scorer = weight.scorer(searcher.getIndexReader().leaves().get(0));
assertFalse(DocIdSetIterator.all(1).getClass().equals(scorer.iterator().getClass()));
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestReqOptSumScorer.java b/lucene/core/src/test/org/apache/lucene/search/TestReqOptSumScorer.java
index d241e727a97..b831c0271f1 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestReqOptSumScorer.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestReqOptSumScorer.java
@@ -62,7 +62,7 @@ public class TestReqOptSumScorer extends LuceneTestCase {
.add(new ConstantScoreQuery(new TermQuery(new Term("f", "foo"))), Occur.MUST)
.add(new ConstantScoreQuery(new TermQuery(new Term("f", "bar"))), Occur.SHOULD)
.build();
- Weight weight = searcher.createNormalizedWeight(query, ScoreMode.TOP_SCORES);
+ Weight weight = searcher.createWeight(searcher.rewrite(query), ScoreMode.TOP_SCORES, 1);
LeafReaderContext context = searcher.getIndexReader().leaves().get(0);
Scorer scorer = weight.scorer(context);
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSearcherManager.java b/lucene/core/src/test/org/apache/lucene/search/TestSearcherManager.java
index cc9a919bebb..c9d7e25549a 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestSearcherManager.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestSearcherManager.java
@@ -487,7 +487,7 @@ public class TestSearcherManager extends ThreadedIndexingAndSearchingTestCase {
FilterDirectoryReader reader = new MyFilterDirectoryReader(nrtReader);
assertEquals(nrtReader, reader.getDelegate());
- assertEquals(nrtReader, FilterDirectoryReader.unwrap(reader));
+ assertEquals(FilterDirectoryReader.unwrap(nrtReader), FilterDirectoryReader.unwrap(reader));
SearcherManager mgr = new SearcherManager(reader, null);
for(int i=0;i<10;i++) {
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestTermQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestTermQuery.java
index dd85c62f663..3b47174e288 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestTermQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestTermQuery.java
@@ -73,10 +73,10 @@ public class TestTermQuery extends LuceneTestCase {
IndexSearcher noSeekSearcher = new IndexSearcher(noSeekReader);
Query query = new TermQuery(new Term("foo", "bar"));
AssertionError e = expectThrows(AssertionError.class,
- () -> noSeekSearcher.createNormalizedWeight(query, ScoreMode.COMPLETE));
+ () -> noSeekSearcher.createWeight(noSeekSearcher.rewrite(query), ScoreMode.COMPLETE, 1));
assertEquals("no seek", e.getMessage());
- noSeekSearcher.createNormalizedWeight(query, ScoreMode.COMPLETE_NO_SCORES); // no exception
+ noSeekSearcher.createWeight(noSeekSearcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1); // no exception
IndexSearcher searcher = new IndexSearcher(reader);
// use a collector rather than searcher.count() which would just read the
// doc freq instead of creating a scorer
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestTermScorer.java b/lucene/core/src/test/org/apache/lucene/search/TestTermScorer.java
index 73d3e6a12dd..89fe2a84941 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestTermScorer.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestTermScorer.java
@@ -82,7 +82,7 @@ public class TestTermScorer extends LuceneTestCase {
Term allTerm = new Term(FIELD, "all");
TermQuery termQuery = new TermQuery(allTerm);
- Weight weight = indexSearcher.createNormalizedWeight(termQuery, ScoreMode.COMPLETE);
+ Weight weight = indexSearcher.createWeight(termQuery, ScoreMode.COMPLETE, 1);
assertTrue(indexSearcher.getTopReaderContext() instanceof LeafReaderContext);
LeafReaderContext context = (LeafReaderContext)indexSearcher.getTopReaderContext();
BulkScorer ts = weight.bulkScorer(context);
@@ -133,7 +133,7 @@ public class TestTermScorer extends LuceneTestCase {
Term allTerm = new Term(FIELD, "all");
TermQuery termQuery = new TermQuery(allTerm);
- Weight weight = indexSearcher.createNormalizedWeight(termQuery, ScoreMode.COMPLETE);
+ Weight weight = indexSearcher.createWeight(termQuery, ScoreMode.COMPLETE, 1);
assertTrue(indexSearcher.getTopReaderContext() instanceof LeafReaderContext);
LeafReaderContext context = (LeafReaderContext) indexSearcher.getTopReaderContext();
Scorer ts = weight.scorer(context);
@@ -150,7 +150,7 @@ public class TestTermScorer extends LuceneTestCase {
Term allTerm = new Term(FIELD, "all");
TermQuery termQuery = new TermQuery(allTerm);
- Weight weight = indexSearcher.createNormalizedWeight(termQuery, ScoreMode.COMPLETE);
+ Weight weight = indexSearcher.createWeight(termQuery, ScoreMode.COMPLETE, 1);
assertTrue(indexSearcher.getTopReaderContext() instanceof LeafReaderContext);
LeafReaderContext context = (LeafReaderContext) indexSearcher.getTopReaderContext();
Scorer ts = weight.scorer(context);
@@ -199,12 +199,12 @@ public class TestTermScorer extends LuceneTestCase {
// We don't use newSearcher because it sometimes runs checkIndex which loads norms
IndexSearcher indexSearcher = new IndexSearcher(forbiddenNorms);
- Weight weight = indexSearcher.createNormalizedWeight(termQuery, ScoreMode.COMPLETE);
+ Weight weight = indexSearcher.createWeight(termQuery, ScoreMode.COMPLETE, 1);
expectThrows(AssertionError.class, () -> {
weight.scorer(forbiddenNorms.getContext()).iterator().nextDoc();
});
- Weight weight2 = indexSearcher.createNormalizedWeight(termQuery, ScoreMode.COMPLETE_NO_SCORES);
+ Weight weight2 = indexSearcher.createWeight(termQuery, ScoreMode.COMPLETE_NO_SCORES, 1);
// should not fail this time since norms are not necessary
weight2.scorer(forbiddenNorms.getContext()).iterator().nextDoc();
}
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestTopDocsMerge.java b/lucene/core/src/test/org/apache/lucene/search/TestTopDocsMerge.java
index ccc8eb67a2c..0173cf9e368 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestTopDocsMerge.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestTopDocsMerge.java
@@ -316,7 +316,7 @@ public class TestTopDocsMerge extends LuceneTestCase {
}
// ... then all shards:
- final Weight w = searcher.createNormalizedWeight(query, ScoreMode.COMPLETE);
+ final Weight w = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE, 1);
final TopDocs[] shardHits;
if (sort == null) {
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestWANDScorer.java b/lucene/core/src/test/org/apache/lucene/search/TestWANDScorer.java
index 5367dbcd3f0..03285bd6ae0 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestWANDScorer.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestWANDScorer.java
@@ -92,7 +92,7 @@ public class TestWANDScorer extends LuceneTestCase {
.build();
Scorer scorer = searcher
- .createNormalizedWeight(query, ScoreMode.TOP_SCORES)
+ .createWeight(searcher.rewrite(query), ScoreMode.TOP_SCORES, 1)
.scorer(searcher.getIndexReader().leaves().get(0));
assertEquals(0, scorer.iterator().nextDoc());
@@ -113,7 +113,7 @@ public class TestWANDScorer extends LuceneTestCase {
assertEquals(DocIdSetIterator.NO_MORE_DOCS, scorer.iterator().nextDoc());
scorer = searcher
- .createNormalizedWeight(query, ScoreMode.TOP_SCORES)
+ .createWeight(searcher.rewrite(query), ScoreMode.TOP_SCORES, 1)
.scorer(searcher.getIndexReader().leaves().get(0));
scorer.setMinCompetitiveScore(4);
@@ -126,7 +126,7 @@ public class TestWANDScorer extends LuceneTestCase {
assertEquals(DocIdSetIterator.NO_MORE_DOCS, scorer.iterator().nextDoc());
scorer = searcher
- .createNormalizedWeight(query, ScoreMode.TOP_SCORES)
+ .createWeight(searcher.rewrite(query), ScoreMode.TOP_SCORES, 1)
.scorer(searcher.getIndexReader().leaves().get(0));
assertEquals(0, scorer.iterator().nextDoc());
@@ -147,7 +147,7 @@ public class TestWANDScorer extends LuceneTestCase {
.build();
scorer = searcher
- .createNormalizedWeight(query, ScoreMode.TOP_SCORES)
+ .createWeight(searcher.rewrite(query), ScoreMode.TOP_SCORES, 1)
.scorer(searcher.getIndexReader().leaves().get(0));
assertEquals(3, scorer.iterator().nextDoc());
@@ -159,7 +159,7 @@ public class TestWANDScorer extends LuceneTestCase {
assertEquals(DocIdSetIterator.NO_MORE_DOCS, scorer.iterator().nextDoc());
scorer = searcher
- .createNormalizedWeight(query, ScoreMode.TOP_SCORES)
+ .createWeight(searcher.rewrite(query), ScoreMode.TOP_SCORES, 1)
.scorer(searcher.getIndexReader().leaves().get(0));
scorer.setMinCompetitiveScore(2);
@@ -177,7 +177,7 @@ public class TestWANDScorer extends LuceneTestCase {
.build();
scorer = searcher
- .createNormalizedWeight(query, ScoreMode.TOP_SCORES)
+ .createWeight(searcher.rewrite(query), ScoreMode.TOP_SCORES, 1)
.scorer(searcher.getIndexReader().leaves().get(0));
assertEquals(0, scorer.iterator().nextDoc());
@@ -192,7 +192,7 @@ public class TestWANDScorer extends LuceneTestCase {
assertEquals(DocIdSetIterator.NO_MORE_DOCS, scorer.iterator().nextDoc());
scorer = searcher
- .createNormalizedWeight(query, ScoreMode.TOP_SCORES)
+ .createWeight(searcher.rewrite(query), ScoreMode.TOP_SCORES, 1)
.scorer(searcher.getIndexReader().leaves().get(0));
scorer.setMinCompetitiveScore(3);
diff --git a/lucene/core/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java b/lucene/core/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java
index 072d3818490..d536f69d615 100644
--- a/lucene/core/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java
+++ b/lucene/core/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java
@@ -194,7 +194,7 @@ public class TestNearSpansOrdered extends LuceneTestCase {
*/
public void testSpanNearScorerSkipTo1() throws Exception {
SpanNearQuery q = makeQuery();
- Weight w = searcher.createNormalizedWeight(q, ScoreMode.COMPLETE);
+ Weight w = searcher.createWeight(searcher.rewrite(q), ScoreMode.COMPLETE, 1);
IndexReaderContext topReaderContext = searcher.getTopReaderContext();
LeafReaderContext leave = topReaderContext.leaves().get(0);
Scorer s = w.scorer(leave);
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/DrillSidewaysQuery.java b/lucene/facet/src/java/org/apache/lucene/facet/DrillSidewaysQuery.java
index 130e1d43cea..8630b7eb6c3 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/DrillSidewaysQuery.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/DrillSidewaysQuery.java
@@ -84,7 +84,7 @@ class DrillSidewaysQuery extends Query {
final Weight baseWeight = baseQuery.createWeight(searcher, scoreMode, boost);
final Weight[] drillDowns = new Weight[drillDownQueries.length];
for(int dim=0;dim groupByDocBlock(IndexSearcher searcher, Query query, int groupOffset, int groupLimit) throws IOException {
int topN = groupOffset + groupLimit;
- final Weight groupEndDocs = searcher.createNormalizedWeight(this.groupEndDocs, ScoreMode.COMPLETE_NO_SCORES);
+ final Query endDocsQuery = searcher.rewrite(this.groupEndDocs);
+ final Weight groupEndDocs = searcher.createWeight(endDocsQuery, ScoreMode.COMPLETE_NO_SCORES, 1);
BlockGroupingCollector c = new BlockGroupingCollector(groupSort, topN, includeScores, groupEndDocs);
searcher.search(query, c);
int topNInsideGroup = groupDocsOffset + groupDocsLimit;
diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java
index f7db6762d51..75cf04f8aa5 100644
--- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java
+++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java
@@ -1042,7 +1042,8 @@ public class TestGrouping extends LuceneTestCase {
}
final boolean needsScores = getScores || getMaxScores || docSort == null;
- final BlockGroupingCollector c3 = new BlockGroupingCollector(groupSort, groupOffset+topNGroups, needsScores, sBlocks.createNormalizedWeight(lastDocInBlock, ScoreMode.COMPLETE_NO_SCORES));
+ final BlockGroupingCollector c3 = new BlockGroupingCollector(groupSort, groupOffset+topNGroups, needsScores,
+ sBlocks.createWeight(sBlocks.rewrite(lastDocInBlock), ScoreMode.COMPLETE_NO_SCORES, 1));
final AllGroupsCollector allGroupsCollector2;
final Collector c4;
if (doAllGroups) {
@@ -1163,7 +1164,7 @@ public class TestGrouping extends LuceneTestCase {
System.out.println("TEST: " + subSearchers.length + " shards: " + Arrays.toString(subSearchers) + " canUseIDV=" + canUseIDV);
}
// Run 1st pass collector to get top groups per shard
- final Weight w = topSearcher.createNormalizedWeight(query, getScores || getMaxScores ? ScoreMode.COMPLETE : ScoreMode.COMPLETE_NO_SCORES);
+ final Weight w = topSearcher.createWeight(topSearcher.rewrite(query), getScores || getMaxScores ? ScoreMode.COMPLETE : ScoreMode.COMPLETE_NO_SCORES, 1);
final List>> shardGroups = new ArrayList<>();
List> firstPassGroupingCollectors = new ArrayList<>();
FirstPassGroupingCollector> firstPassCollector = null;
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/QueryTermExtractor.java b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/QueryTermExtractor.java
index e0a5c2ceb73..9d3784ae118 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/QueryTermExtractor.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/QueryTermExtractor.java
@@ -129,7 +129,7 @@ public final class QueryTermExtractor
else {
HashSet nonWeightedTerms = new HashSet<>();
try {
- EMPTY_INDEXSEARCHER.createNormalizedWeight(query, ScoreMode.COMPLETE_NO_SCORES).extractTerms(nonWeightedTerms);
+ EMPTY_INDEXSEARCHER.createWeight(EMPTY_INDEXSEARCHER.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1).extractTerms(nonWeightedTerms);
} catch (IOException bogus) {
throw new RuntimeException("Should not happen on an empty index", bogus);
}
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/WeightedSpanTermExtractor.java b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/WeightedSpanTermExtractor.java
index fbb59e3d9bf..e0655c5ee58 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/WeightedSpanTermExtractor.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/WeightedSpanTermExtractor.java
@@ -306,7 +306,7 @@ public class WeightedSpanTermExtractor {
q = spanQuery;
}
LeafReaderContext context = getLeafContext();
- SpanWeight w = (SpanWeight) searcher.createNormalizedWeight(q, ScoreMode.COMPLETE_NO_SCORES);
+ SpanWeight w = (SpanWeight) searcher.createWeight(searcher.rewrite(q), ScoreMode.COMPLETE_NO_SCORES, 1);
Bits acceptDocs = context.reader().getLiveDocs();
final Spans spans = w.getSpans(context, SpanWeight.Postings.POSITIONS);
if (spans == null) {
@@ -360,7 +360,7 @@ public class WeightedSpanTermExtractor {
protected void extractWeightedTerms(Map terms, Query query, float boost) throws IOException {
Set nonWeightedTerms = new HashSet<>();
final IndexSearcher searcher = new IndexSearcher(getLeafContext());
- searcher.createNormalizedWeight(query, ScoreMode.COMPLETE_NO_SCORES).extractTerms(nonWeightedTerms);
+ searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1).extractTerms(nonWeightedTerms);
for (final Term queryTerm : nonWeightedTerms) {
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/PhraseHelper.java b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/PhraseHelper.java
index 2edb19244c6..ff620b058b7 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/PhraseHelper.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/PhraseHelper.java
@@ -227,7 +227,7 @@ public class PhraseHelper {
}
};
for (Query query : spanQueries) {
- Weight weight = searcher.createNormalizedWeight(query, ScoreMode.COMPLETE_NO_SCORES);
+ Weight weight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1);
Scorer scorer = weight.scorer(leafReader.getContext());
if (scorer == null) {
continue;
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/UnifiedHighlighter.java b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/UnifiedHighlighter.java
index 065ad5ce956..cfc918f878c 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/UnifiedHighlighter.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/UnifiedHighlighter.java
@@ -144,7 +144,7 @@ public class UnifiedHighlighter {
*/
protected static Set extractTerms(Query query) throws IOException {
Set queryTerms = new HashSet<>();
- EMPTY_INDEXSEARCHER.createNormalizedWeight(query, ScoreMode.COMPLETE_NO_SCORES).extractTerms(queryTerms);
+ EMPTY_INDEXSEARCHER.createWeight(EMPTY_INDEXSEARCHER.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1).extractTerms(queryTerms);
return queryTerms;
}
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/QueryBitSetProducer.java b/lucene/join/src/java/org/apache/lucene/search/join/QueryBitSetProducer.java
index c055ea6e235..e3a3a46a84f 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/QueryBitSetProducer.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/QueryBitSetProducer.java
@@ -70,7 +70,8 @@ public class QueryBitSetProducer implements BitSetProducer {
final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context);
final IndexSearcher searcher = new IndexSearcher(topLevelContext);
searcher.setQueryCache(null);
- final Weight weight = searcher.createNormalizedWeight(query, org.apache.lucene.search.ScoreMode.COMPLETE_NO_SCORES);
+ final Query rewritten = searcher.rewrite(query);
+ final Weight weight = searcher.createWeight(rewritten, org.apache.lucene.search.ScoreMode.COMPLETE_NO_SCORES, 1);
final Scorer s = weight.scorer(context);
if (s == null) {
diff --git a/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java b/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java
index c90bfdc08f3..cad709b9a7b 100644
--- a/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java
+++ b/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java
@@ -1113,7 +1113,7 @@ public class TestBlockJoin extends LuceneTestCase {
CheckJoinIndex.check(s.getIndexReader(), parentFilter);
ToParentBlockJoinQuery q = new ToParentBlockJoinQuery(tq, parentFilter, ScoreMode.Avg);
- Weight weight = s.createNormalizedWeight(q, org.apache.lucene.search.ScoreMode.COMPLETE);
+ Weight weight = s.createWeight(s.rewrite(q), org.apache.lucene.search.ScoreMode.COMPLETE, 1);
Scorer sc = weight.scorer(s.getIndexReader().leaves().get(0));
assertEquals(1, sc.iterator().advance(1));
r.close();
@@ -1147,7 +1147,7 @@ public class TestBlockJoin extends LuceneTestCase {
CheckJoinIndex.check(s.getIndexReader(), parentFilter);
ToParentBlockJoinQuery q = new ToParentBlockJoinQuery(tq, parentFilter, ScoreMode.Avg);
- Weight weight = s.createNormalizedWeight(q, org.apache.lucene.search.ScoreMode.COMPLETE);
+ Weight weight = s.createWeight(s.rewrite(q), org.apache.lucene.search.ScoreMode.COMPLETE, 1);
Scorer sc = weight.scorer(s.getIndexReader().leaves().get(0));
assertEquals(2, sc.iterator().advance(0));
r.close();
@@ -1199,7 +1199,7 @@ public class TestBlockJoin extends LuceneTestCase {
CheckJoinIndex.check(r, parentsFilter);
ToParentBlockJoinQuery childJoinQuery = new ToParentBlockJoinQuery(childQuery, parentsFilter, ScoreMode.Avg);
- Weight weight = searcher.createNormalizedWeight(childJoinQuery, RandomPicks.randomFrom(random(), org.apache.lucene.search.ScoreMode.values()));
+ Weight weight = searcher.createWeight(searcher.rewrite(childJoinQuery), RandomPicks.randomFrom(random(), org.apache.lucene.search.ScoreMode.values()), 1);
Scorer scorer = weight.scorer(searcher.getIndexReader().leaves().get(0));
assertNull(scorer);
@@ -1207,7 +1207,7 @@ public class TestBlockJoin extends LuceneTestCase {
childQuery = new TermQuery(new Term("bogus", "bogus"));
childJoinQuery = new ToParentBlockJoinQuery(childQuery, parentsFilter, ScoreMode.Avg);
- weight = searcher.createNormalizedWeight(childJoinQuery, RandomPicks.randomFrom(random(), org.apache.lucene.search.ScoreMode.values()));
+ weight = searcher.createWeight(searcher.rewrite(childJoinQuery), RandomPicks.randomFrom(random(), org.apache.lucene.search.ScoreMode.values()), 1);
scorer = weight.scorer(searcher.getIndexReader().leaves().get(0));
assertNull(scorer);
@@ -1401,7 +1401,7 @@ public class TestBlockJoin extends LuceneTestCase {
ToChildBlockJoinQuery parentJoinQuery = new ToChildBlockJoinQuery(parentQuery, parentFilter);
- Weight weight = s.createNormalizedWeight(parentJoinQuery, RandomPicks.randomFrom(random(), org.apache.lucene.search.ScoreMode.values()));
+ Weight weight = s.createWeight(s.rewrite(parentJoinQuery), RandomPicks.randomFrom(random(), org.apache.lucene.search.ScoreMode.values()), 1);
Scorer advancingScorer = weight.scorer(s.getIndexReader().leaves().get(0));
Scorer nextDocScorer = weight.scorer(s.getIndexReader().leaves().get(0));
diff --git a/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoinValidation.java b/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoinValidation.java
index ccdb35ac7ee..99e447cfd68 100644
--- a/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoinValidation.java
+++ b/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoinValidation.java
@@ -103,7 +103,7 @@ public class TestBlockJoinValidation extends LuceneTestCase {
ToChildBlockJoinQuery blockJoinQuery = new ToChildBlockJoinQuery(parentQuery, parentsFilter);
final LeafReaderContext context = indexSearcher.getIndexReader().leaves().get(0);
- Weight weight = indexSearcher.createNormalizedWeight(blockJoinQuery, org.apache.lucene.search.ScoreMode.COMPLETE);
+ Weight weight = indexSearcher.createWeight(indexSearcher.rewrite(blockJoinQuery), org.apache.lucene.search.ScoreMode.COMPLETE, 1);
Scorer scorer = weight.scorer(context);
final Bits parentDocs = parentsFilter.getBitSet(context);
diff --git a/lucene/misc/src/java/org/apache/lucene/index/PKIndexSplitter.java b/lucene/misc/src/java/org/apache/lucene/index/PKIndexSplitter.java
index a1b86115c46..0854268ba18 100644
--- a/lucene/misc/src/java/org/apache/lucene/index/PKIndexSplitter.java
+++ b/lucene/misc/src/java/org/apache/lucene/index/PKIndexSplitter.java
@@ -106,7 +106,8 @@ public class PKIndexSplitter {
try {
final IndexSearcher searcher = new IndexSearcher(reader);
searcher.setQueryCache(null);
- final Weight preserveWeight = searcher.createNormalizedWeight(preserveFilter, ScoreMode.COMPLETE_NO_SCORES);
+ preserveFilter = searcher.rewrite(preserveFilter);
+ final Weight preserveWeight = searcher.createWeight(preserveFilter, ScoreMode.COMPLETE_NO_SCORES, 1);
final List leaves = reader.leaves();
final CodecReader[] subReaders = new CodecReader[leaves.size()];
int i = 0;
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/QueryValueSource.java b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/QueryValueSource.java
index 597a149c471..e535b55018f 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/QueryValueSource.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/QueryValueSource.java
@@ -72,7 +72,8 @@ public class QueryValueSource extends ValueSource {
@Override
public void createWeight(Map context, IndexSearcher searcher) throws IOException {
- Weight w = searcher.createNormalizedWeight(q, ScoreMode.COMPLETE);
+ Query rewritten = searcher.rewrite(q);
+ Weight w = searcher.createWeight(rewritten, ScoreMode.COMPLETE, 1);
context.put(this, w);
}
}
diff --git a/lucene/queries/src/test/org/apache/lucene/queries/function/TestIndexReaderFunctions.java b/lucene/queries/src/test/org/apache/lucene/queries/function/TestIndexReaderFunctions.java
index 49a3ad3ec3e..ea210743c69 100644
--- a/lucene/queries/src/test/org/apache/lucene/queries/function/TestIndexReaderFunctions.java
+++ b/lucene/queries/src/test/org/apache/lucene/queries/function/TestIndexReaderFunctions.java
@@ -159,14 +159,14 @@ public class TestIndexReaderFunctions extends LuceneTestCase {
void assertCacheable(DoubleValuesSource vs, boolean expected) throws Exception {
Query q = new FunctionScoreQuery(new MatchAllDocsQuery(), vs);
- Weight w = searcher.createNormalizedWeight(q, ScoreMode.COMPLETE);
+ Weight w = searcher.createWeight(q, ScoreMode.COMPLETE, 1);
LeafReaderContext ctx = reader.leaves().get(0);
assertEquals(expected, w.isCacheable(ctx));
}
void assertCacheable(LongValuesSource vs, boolean expected) throws Exception {
Query q = new FunctionScoreQuery(new MatchAllDocsQuery(), vs.toDoubleValuesSource());
- Weight w = searcher.createNormalizedWeight(q, ScoreMode.COMPLETE);
+ Weight w = searcher.createWeight(q, ScoreMode.COMPLETE, 1);
LeafReaderContext ctx = reader.leaves().get(0);
assertEquals(expected, w.isCacheable(ctx));
}
diff --git a/lucene/sandbox/src/java/org/apache/lucene/payloads/PayloadSpanUtil.java b/lucene/sandbox/src/java/org/apache/lucene/payloads/PayloadSpanUtil.java
index 3a8a4f43144..3cc7701fb46 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/payloads/PayloadSpanUtil.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/payloads/PayloadSpanUtil.java
@@ -172,7 +172,8 @@ public class PayloadSpanUtil {
final IndexSearcher searcher = new IndexSearcher(context);
searcher.setQueryCache(null);
- SpanWeight w = (SpanWeight) searcher.createNormalizedWeight(query, ScoreMode.COMPLETE_NO_SCORES);
+ query = (SpanQuery) searcher.rewrite(query);
+ SpanWeight w = (SpanWeight) searcher.createWeight(query, ScoreMode.COMPLETE_NO_SCORES, 1);
PayloadSpanCollector collector = new PayloadSpanCollector();
for (LeafReaderContext leafReaderContext : context.leaves()) {
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java b/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java
index b82df680297..15ca469c565 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java
@@ -381,7 +381,7 @@ public class RandomIndexWriter implements Closeable {
if (r.nextInt(20) == 2) {
doRandomForceMerge();
}
- if (!applyDeletions || r.nextBoolean() || w.getConfig().getSoftDeletesField() != null) {
+ if (!applyDeletions || r.nextBoolean()) {
// if we have soft deletes we can't open from a directory
if (LuceneTestCase.VERBOSE) {
System.out.println("RIW.getReader: use NRT reader");
@@ -396,7 +396,12 @@ public class RandomIndexWriter implements Closeable {
}
w.commit();
if (r.nextBoolean()) {
- return DirectoryReader.open(w.getDirectory());
+ DirectoryReader reader = DirectoryReader.open(w.getDirectory());
+ if (w.getConfig().getSoftDeletesField() != null) {
+ return new SoftDeletesDirectoryReaderWrapper(reader, w.getConfig().getSoftDeletesField());
+ } else {
+ return reader;
+ }
} else {
return w.getReader(applyDeletions, writeAllDeletes);
}
diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/CheckHits.java b/lucene/test-framework/src/java/org/apache/lucene/search/CheckHits.java
index 8d8b60b8827..4452c1965e2 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/search/CheckHits.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/search/CheckHits.java
@@ -16,22 +16,21 @@
*/
package org.apache.lucene.search;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
import java.io.IOException;
import java.util.Locale;
+import java.util.Random;
import java.util.Set;
import java.util.TreeSet;
import java.util.regex.Pattern;
-import java.util.Random;
import junit.framework.Assert;
-
-import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.util.LuceneTestCase;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
/**
* Utility class for asserting expected hits in tests.
*/
@@ -541,8 +540,9 @@ public class CheckHits {
}
private static void doCheckMaxScores(Random random, Query query, IndexSearcher searcher) throws IOException {
- Weight w1 = searcher.createNormalizedWeight(query, ScoreMode.COMPLETE);
- Weight w2 = searcher.createNormalizedWeight(query, ScoreMode.TOP_SCORES);
+ query = searcher.rewrite(query);
+ Weight w1 = searcher.createWeight(query, ScoreMode.COMPLETE, 1);
+ Weight w2 = searcher.createWeight(query, ScoreMode.TOP_SCORES, 1);
// Check boundaries and max scores when iterating all matches
for (LeafReaderContext ctx : searcher.getIndexReader().leaves()) {
diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java b/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java
index fa113113f81..139cb9d16ed 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java
@@ -309,7 +309,8 @@ public class QueryUtils {
lastDoc[0] = doc;
try {
if (scorer == null) {
- Weight w = s.createNormalizedWeight(q, ScoreMode.COMPLETE);
+ Query rewritten = s.rewrite(q);
+ Weight w = s.createWeight(rewritten, ScoreMode.COMPLETE, 1);
LeafReaderContext context = readerContextArray.get(leafPtr);
scorer = w.scorer(context);
iterator = scorer.iterator();
@@ -374,7 +375,8 @@ public class QueryUtils {
final LeafReader previousReader = lastReader[0];
IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader, false);
indexSearcher.setSimilarity(s.getSimilarity());
- Weight w = indexSearcher.createNormalizedWeight(q, ScoreMode.COMPLETE);
+ Query rewritten = indexSearcher.rewrite(q);
+ Weight w = indexSearcher.createWeight(rewritten, ScoreMode.COMPLETE, 1);
LeafReaderContext ctx = (LeafReaderContext)indexSearcher.getTopReaderContext();
Scorer scorer = w.scorer(ctx);
if (scorer != null) {
@@ -404,7 +406,8 @@ public class QueryUtils {
final LeafReader previousReader = lastReader[0];
IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader, false);
indexSearcher.setSimilarity(s.getSimilarity());
- Weight w = indexSearcher.createNormalizedWeight(q, ScoreMode.COMPLETE);
+ Query rewritten = indexSearcher.rewrite(q);
+ Weight w = indexSearcher.createWeight(rewritten, ScoreMode.COMPLETE, 1);
LeafReaderContext ctx = previousReader.getContext();
Scorer scorer = w.scorer(ctx);
if (scorer != null) {
@@ -430,6 +433,7 @@ public class QueryUtils {
final int lastDoc[] = {-1};
final LeafReader lastReader[] = {null};
final List context = s.getTopReaderContext().leaves();
+ Query rewritten = s.rewrite(q);
s.search(q,new SimpleCollector() {
private Scorer scorer;
private int leafPtr;
@@ -443,7 +447,7 @@ public class QueryUtils {
try {
long startMS = System.currentTimeMillis();
for (int i=lastDoc[0]+1; i<=doc; i++) {
- Weight w = s.createNormalizedWeight(q, ScoreMode.COMPLETE);
+ Weight w = s.createWeight(rewritten, ScoreMode.COMPLETE, 1);
Scorer scorer = w.scorer(context.get(leafPtr));
Assert.assertTrue("query collected "+doc+" but advance("+i+") says no more docs!",scorer.iterator().advance(i) != DocIdSetIterator.NO_MORE_DOCS);
Assert.assertEquals("query collected "+doc+" but advance("+i+") got to "+scorer.docID(),doc,scorer.docID());
@@ -476,7 +480,7 @@ public class QueryUtils {
final LeafReader previousReader = lastReader[0];
IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader, false);
indexSearcher.setSimilarity(s.getSimilarity());
- Weight w = indexSearcher.createNormalizedWeight(q, ScoreMode.COMPLETE);
+ Weight w = indexSearcher.createWeight(rewritten, ScoreMode.COMPLETE, 1);
Scorer scorer = w.scorer((LeafReaderContext)indexSearcher.getTopReaderContext());
if (scorer != null) {
DocIdSetIterator iterator = scorer.iterator();
@@ -504,7 +508,7 @@ public class QueryUtils {
final LeafReader previousReader = lastReader[0];
IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader, false);
indexSearcher.setSimilarity(s.getSimilarity());
- Weight w = indexSearcher.createNormalizedWeight(q, ScoreMode.COMPLETE);
+ Weight w = indexSearcher.createWeight(rewritten, ScoreMode.COMPLETE, 1);
Scorer scorer = w.scorer((LeafReaderContext)indexSearcher.getTopReaderContext());
if (scorer != null) {
DocIdSetIterator iterator = scorer.iterator();
@@ -523,7 +527,8 @@ public class QueryUtils {
/** Check that the scorer and bulk scorer advance consistently. */
public static void checkBulkScorerSkipTo(Random r, Query query, IndexSearcher searcher) throws IOException {
- Weight weight = searcher.createNormalizedWeight(query, ScoreMode.COMPLETE);
+ query = searcher.rewrite(query);
+ Weight weight = searcher.createWeight(query, ScoreMode.COMPLETE, 1);
for (LeafReaderContext context : searcher.getIndexReader().leaves()) {
final Scorer scorer = weight.scorer(context);
final BulkScorer bulkScorer = weight.bulkScorer(context);
diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/ShardSearchingTestBase.java b/lucene/test-framework/src/java/org/apache/lucene/search/ShardSearchingTestBase.java
index b92ed75dfb3..1b81b77614e 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/search/ShardSearchingTestBase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/search/ShardSearchingTestBase.java
@@ -230,7 +230,8 @@ public abstract class ShardSearchingTestBase extends LuceneTestCase {
@Override
public Query rewrite(Query original) throws IOException {
final IndexSearcher localSearcher = new IndexSearcher(getIndexReader());
- final Weight weight = localSearcher.createNormalizedWeight(original, ScoreMode.COMPLETE);
+ original = localSearcher.rewrite(original);
+ final Weight weight = localSearcher.createWeight(original, ScoreMode.COMPLETE, 1);
final Set terms = new HashSet<>();
weight.extractTerms(terms);
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index c7270da5c64..72ea677605a 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -78,6 +78,9 @@ New Features
* SOLR-7887: Upgrade Solr to use Log4J 2.11
(Tim Potter, Keith Laban, Shawn Heisey, Ralph Goers, Erick Erickson, Varun Thacker)
+* SOLR-12139: The "eq" (equals) function query now works with string fields, string literals, and perhaps anything.
+ (Andrey Kudryavtsev, David Smiley)
+
Bug Fixes
----------------------
@@ -114,6 +117,9 @@ Bug Fixes
* SOLR-12199: TestReplicationHandler.doTestRepeater(): TEST_PORT interpolation failure:
Server refused connection at: http://127.0.0.1:TEST_PORT/solr (Mikhail Khludnev, Dawid Weiss, Steve Rowe)
+* SOLR-12096: Fixed inconsistent results format of subquery transformer for distributed search (multi-shard).
+ (Munendra S N, Mikhail Khludnev via Ishan Chattopadhyaya)
+
Optimizations
----------------------
@@ -386,7 +392,7 @@ Bug Fixes
* SOLR-11988: Fix exists() method in EphemeralDirectoryFactory/MockDirectoryFactory to prevent false positives (hossman)
-* SOLR-11971: Don't allow referal to external resources in DataImportHandler's dataConfig request parameter.
+* SOLR-11971: Don't allow referal to external resources in DataImportHandler's dataConfig request parameter (CVE-2018-1308).
(麦 香浓郁, Uwe Schindler)
* SOLR-12021: Fixed a bug in ApiSpec and other JSON resource loading that was causing unclosed file handles (hossman)
diff --git a/solr/contrib/ltr/src/java/org/apache/solr/ltr/LTRRescorer.java b/solr/contrib/ltr/src/java/org/apache/solr/ltr/LTRRescorer.java
index 2e7049eddeb..b6e46cfe9e9 100644
--- a/solr/contrib/ltr/src/java/org/apache/solr/ltr/LTRRescorer.java
+++ b/solr/contrib/ltr/src/java/org/apache/solr/ltr/LTRRescorer.java
@@ -115,7 +115,7 @@ public class LTRRescorer extends Rescorer {
final ScoreDoc[] reranked = new ScoreDoc[topN];
final List leaves = searcher.getIndexReader().leaves();
final LTRScoringQuery.ModelWeight modelWeight = (LTRScoringQuery.ModelWeight) searcher
- .createNormalizedWeight(scoringQuery, ScoreMode.COMPLETE);
+ .createWeight(searcher.rewrite(scoringQuery), ScoreMode.COMPLETE, 1);
scoreFeatures(searcher, firstPassTopDocs,topN, modelWeight, hits, leaves, reranked);
// Must sort all documents that we reranked, and then select the top
@@ -219,8 +219,8 @@ public class LTRRescorer extends Rescorer {
final int n = ReaderUtil.subIndex(docID, leafContexts);
final LeafReaderContext context = leafContexts.get(n);
final int deBasedDoc = docID - context.docBase;
- final Weight modelWeight = searcher.createNormalizedWeight(scoringQuery,
- ScoreMode.COMPLETE);
+ final Weight modelWeight = searcher.createWeight(searcher.rewrite(scoringQuery),
+ ScoreMode.COMPLETE, 1);
return modelWeight.explain(context, deBasedDoc);
}
diff --git a/solr/contrib/ltr/src/java/org/apache/solr/ltr/feature/OriginalScoreFeature.java b/solr/contrib/ltr/src/java/org/apache/solr/ltr/feature/OriginalScoreFeature.java
index b538b8663d9..7a7770ec993 100644
--- a/solr/contrib/ltr/src/java/org/apache/solr/ltr/feature/OriginalScoreFeature.java
+++ b/solr/contrib/ltr/src/java/org/apache/solr/ltr/feature/OriginalScoreFeature.java
@@ -70,7 +70,7 @@ public class OriginalScoreFeature extends Feature {
public OriginalScoreWeight(IndexSearcher searcher,
SolrQueryRequest request, Query originalQuery, Map efi) throws IOException {
super(OriginalScoreFeature.this, searcher, request, originalQuery, efi);
- w = searcher.createNormalizedWeight(originalQuery, ScoreMode.COMPLETE);
+ w = searcher.createWeight(searcher.rewrite(originalQuery), ScoreMode.COMPLETE, 1);
};
diff --git a/solr/contrib/ltr/src/java/org/apache/solr/ltr/feature/SolrFeature.java b/solr/contrib/ltr/src/java/org/apache/solr/ltr/feature/SolrFeature.java
index 612085dd979..7866ecc3ae9 100644
--- a/solr/contrib/ltr/src/java/org/apache/solr/ltr/feature/SolrFeature.java
+++ b/solr/contrib/ltr/src/java/org/apache/solr/ltr/feature/SolrFeature.java
@@ -179,7 +179,7 @@ public class SolrFeature extends Feature {
// leaving nothing for the phrase query to parse.
if (query != null) {
queryAndFilters.add(query);
- solrQueryWeight = searcher.createNormalizedWeight(query, ScoreMode.COMPLETE);
+ solrQueryWeight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE, 1);
} else {
solrQueryWeight = null;
}
diff --git a/solr/contrib/ltr/src/test/org/apache/solr/ltr/TestLTRScoringQuery.java b/solr/contrib/ltr/src/test/org/apache/solr/ltr/TestLTRScoringQuery.java
index b38dacce21f..27978946b28 100644
--- a/solr/contrib/ltr/src/test/org/apache/solr/ltr/TestLTRScoringQuery.java
+++ b/solr/contrib/ltr/src/test/org/apache/solr/ltr/TestLTRScoringQuery.java
@@ -99,7 +99,7 @@ public class TestLTRScoringQuery extends LuceneTestCase {
final LeafReaderContext context = leafContexts.get(n);
final int deBasedDoc = hits.scoreDocs[0].doc - context.docBase;
- final Weight weight = searcher.createNormalizedWeight(model, ScoreMode.COMPLETE);
+ final Weight weight = searcher.createWeight(searcher.rewrite(model), ScoreMode.COMPLETE, 1);
final Scorer scorer = weight.scorer(context);
// rerank using the field final-score
diff --git a/solr/contrib/ltr/src/test/org/apache/solr/ltr/TestSelectiveWeightCreation.java b/solr/contrib/ltr/src/test/org/apache/solr/ltr/TestSelectiveWeightCreation.java
index 4e9e949ab3b..3591ce23569 100644
--- a/solr/contrib/ltr/src/test/org/apache/solr/ltr/TestSelectiveWeightCreation.java
+++ b/solr/contrib/ltr/src/test/org/apache/solr/ltr/TestSelectiveWeightCreation.java
@@ -80,7 +80,7 @@ public class TestSelectiveWeightCreation extends TestRerankBase {
final LeafReaderContext context = leafContexts.get(n);
final int deBasedDoc = hits.scoreDocs[0].doc - context.docBase;
- final Weight weight = searcher.createNormalizedWeight(model, ScoreMode.COMPLETE);
+ final Weight weight = searcher.createWeight(searcher.rewrite(model), ScoreMode.COMPLETE, 1);
final Scorer scorer = weight.scorer(context);
// rerank using the field final-score
diff --git a/solr/core/src/java/org/apache/solr/response/GeoJSONResponseWriter.java b/solr/core/src/java/org/apache/solr/response/GeoJSONResponseWriter.java
index 43fd7b48ed6..012290eb5df 100644
--- a/solr/core/src/java/org/apache/solr/response/GeoJSONResponseWriter.java
+++ b/solr/core/src/java/org/apache/solr/response/GeoJSONResponseWriter.java
@@ -166,7 +166,8 @@ class GeoJSONWriter extends JSONWriter {
// SolrDocument will now have multiValued fields represented as a Collection,
// even if only a single value is returned for this document.
- if (val instanceof List) {
+ // For SolrDocumentList, use writeVal instead of writeArray
+ if (!(val instanceof SolrDocumentList) && val instanceof List) {
// shortcut this common case instead of going through writeVal again
writeArray(name,((Iterable)val).iterator());
} else {
diff --git a/solr/core/src/java/org/apache/solr/response/JSONResponseWriter.java b/solr/core/src/java/org/apache/solr/response/JSONResponseWriter.java
index 513df4eed53..5f6e2f27bc4 100644
--- a/solr/core/src/java/org/apache/solr/response/JSONResponseWriter.java
+++ b/solr/core/src/java/org/apache/solr/response/JSONResponseWriter.java
@@ -25,10 +25,11 @@ import java.util.Map;
import java.util.Set;
import org.apache.solr.common.IteratorWriter;
+import org.apache.solr.common.MapWriter;
import org.apache.solr.common.MapWriter.EntryWriter;
import org.apache.solr.common.PushWriter;
import org.apache.solr.common.SolrDocument;
-import org.apache.solr.common.MapWriter;
+import org.apache.solr.common.SolrDocumentList;
import org.apache.solr.common.params.SolrParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap;
@@ -367,7 +368,8 @@ class JSONWriter extends TextResponseWriter {
// SolrDocument will now have multiValued fields represented as a Collection,
// even if only a single value is returned for this document.
- if (val instanceof List) {
+ // For SolrDocumentList, use writeVal instead of writeArray
+ if (!(val instanceof SolrDocumentList) && val instanceof List) {
// shortcut this common case instead of going through writeVal again
writeArray(name,((Iterable)val).iterator());
} else {
diff --git a/solr/core/src/java/org/apache/solr/search/QueryWrapperFilter.java b/solr/core/src/java/org/apache/solr/search/QueryWrapperFilter.java
index cce8b1a2435..1d9de70b405 100644
--- a/solr/core/src/java/org/apache/solr/search/QueryWrapperFilter.java
+++ b/solr/core/src/java/org/apache/solr/search/QueryWrapperFilter.java
@@ -66,7 +66,8 @@ public class QueryWrapperFilter extends Filter {
public DocIdSet getDocIdSet(final LeafReaderContext context, final Bits acceptDocs) throws IOException {
// get a private context that is used to rewrite, createWeight and score eventually
final LeafReaderContext privateContext = context.reader().getContext();
- final Weight weight = new IndexSearcher(privateContext).createNormalizedWeight(query, ScoreMode.COMPLETE_NO_SCORES);
+ final IndexSearcher searcher = new IndexSearcher(privateContext);
+ final Weight weight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1);
DocIdSet set = new DocIdSet() {
@Override
diff --git a/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java b/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java
index d77690fd281..2dbc2db9b08 100644
--- a/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java
+++ b/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java
@@ -1061,7 +1061,7 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable, SolrI
List weights = new ArrayList<>(notCached.size());
for (Query q : notCached) {
Query qq = QueryUtils.makeQueryable(q);
- weights.add(createNormalizedWeight(qq, ScoreMode.COMPLETE));
+ weights.add(createWeight(rewrite(qq), ScoreMode.COMPLETE, 1));
}
pf.filter = new FilterImpl(answer, weights);
pf.hasDeletedDocs = (answer == null); // if all clauses were uncached, the resulting filter may match deleted docs
diff --git a/solr/core/src/java/org/apache/solr/search/ValueSourceParser.java b/solr/core/src/java/org/apache/solr/search/ValueSourceParser.java
index 450d95a7aff..0e26bf83928 100644
--- a/solr/core/src/java/org/apache/solr/search/ValueSourceParser.java
+++ b/solr/core/src/java/org/apache/solr/search/ValueSourceParser.java
@@ -67,6 +67,7 @@ import org.apache.solr.search.facet.UniqueAgg;
import org.apache.solr.search.facet.VarianceAgg;
import org.apache.solr.search.function.CollapseScoreFunction;
import org.apache.solr.search.function.ConcatStringFunction;
+import org.apache.solr.search.function.EqualFunction;
import org.apache.solr.search.function.OrdFieldSource;
import org.apache.solr.search.function.ReverseOrdFieldSource;
import org.apache.solr.search.function.SolrComparisonBoolFunction;
@@ -922,7 +923,7 @@ public abstract class ValueSourceParser implements NamedListInitializedPlugin {
ValueSource lhsValSource = fp.parseValueSource();
ValueSource rhsValSource = fp.parseValueSource();
- return new SolrComparisonBoolFunction(lhsValSource, rhsValSource, "eq", (cmp) -> cmp == 0);
+ return new EqualFunction(lhsValSource, rhsValSource, "eq");
}
});
diff --git a/solr/core/src/java/org/apache/solr/search/function/EqualFunction.java b/solr/core/src/java/org/apache/solr/search/function/EqualFunction.java
new file mode 100644
index 00000000000..411511edad0
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/search/function/EqualFunction.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.search.function;
+
+import java.io.IOException;
+import java.util.Objects;
+
+import org.apache.lucene.queries.function.FunctionValues;
+import org.apache.lucene.queries.function.ValueSource;
+import org.apache.lucene.queries.function.valuesource.ComparisonBoolFunction;
+
+/**
+ * Compares two values for equality.
+ * It should work on not only numbers but strings and custom things.
+ *
+ * @since 7.4
+ */
+public class EqualFunction extends ComparisonBoolFunction {
+
+ public EqualFunction(ValueSource lhs, ValueSource rhs, String name) {
+ super(lhs, rhs, name);
+ }
+
+ @Override
+ public boolean compare(int doc, FunctionValues lhs, FunctionValues rhs) throws IOException {
+ Object objL = lhs.objectVal(doc);
+ Object objR = rhs.objectVal(doc);
+ if (isNumeric(objL) && isNumeric(objR)) {
+ if (isInteger(objL) && isInteger(objR)) {
+ return Long.compare(((Number)objL).longValue(), ((Number)objR).longValue()) == 0;
+ } else {
+ return Double.compare(((Number)objL).doubleValue(), ((Number)objR).doubleValue()) == 0;
+ }
+ } else {
+ return Objects.equals(objL, objR);
+ }
+ }
+
+ private static boolean isInteger(Object obj) {
+ return obj instanceof Integer || obj instanceof Long;
+ }
+
+ private static boolean isNumeric(Object obj) {
+ return obj instanceof Number;
+ }
+}
diff --git a/solr/core/src/java/org/apache/solr/search/stats/ExactStatsCache.java b/solr/core/src/java/org/apache/solr/search/stats/ExactStatsCache.java
index f09ddf760a0..819d371ec84 100644
--- a/solr/core/src/java/org/apache/solr/search/stats/ExactStatsCache.java
+++ b/solr/core/src/java/org/apache/solr/search/stats/ExactStatsCache.java
@@ -16,6 +16,16 @@
*/
package org.apache.solr.search.stats;
+import java.io.IOException;
+import java.lang.invoke.MethodHandles;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
import com.google.common.collect.Lists;
import org.apache.lucene.index.IndexReaderContext;
import org.apache.lucene.index.Term;
@@ -39,16 +49,6 @@ import org.apache.solr.search.SolrIndexSearcher;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.io.IOException;
-import java.lang.invoke.MethodHandles;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-
/**
* This class implements exact caching of statistics. It requires an additional
* round-trip to parse query at shard servers, and return term statistics for
@@ -157,7 +157,7 @@ public class ExactStatsCache extends StatsCache {
Query q = rb.getQuery();
try {
HashSet terms = new HashSet<>();
- searcher.createNormalizedWeight(q, ScoreMode.COMPLETE).extractTerms(terms);
+ searcher.createWeight(searcher.rewrite(q), ScoreMode.COMPLETE, 1).extractTerms(terms);
IndexReaderContext context = searcher.getTopReaderContext();
HashMap statsMap = new HashMap<>();
HashMap colMap = new HashMap<>();
diff --git a/solr/core/src/test/org/apache/solr/response/JSONWriterTest.java b/solr/core/src/test/org/apache/solr/response/JSONWriterTest.java
index 1b531505d0f..68cebd24416 100644
--- a/solr/core/src/test/org/apache/solr/response/JSONWriterTest.java
+++ b/solr/core/src/test/org/apache/solr/response/JSONWriterTest.java
@@ -22,7 +22,10 @@ import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.List;
+
+import org.apache.solr.JSONTestUtil;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.common.SolrDocument;
import org.apache.solr.common.SolrDocumentList;
@@ -130,9 +133,9 @@ public class JSONWriterTest extends SolrTestCaseJ4 {
}
@Test
- public void testJSONSolrDocument() throws IOException {
+ public void testJSONSolrDocument() throws Exception {
SolrQueryRequest req = req(CommonParams.WT,"json",
- CommonParams.FL,"id,score");
+ CommonParams.FL,"id,score,_children_,path");
SolrQueryResponse rsp = new SolrQueryResponse();
JSONResponseWriter w = new JSONResponseWriter();
@@ -141,11 +144,22 @@ public class JSONWriterTest extends SolrTestCaseJ4 {
StringWriter buf = new StringWriter();
+ SolrDocument childDoc = new SolrDocument();
+ childDoc.addField("id", "2");
+ childDoc.addField("score", "0.4");
+ childDoc.addField("path", Arrays.asList("a>b", "a>b>c"));
+
+ SolrDocumentList childList = new SolrDocumentList();
+ childList.setNumFound(1);
+ childList.setStart(0);
+ childList.add(childDoc);
+
SolrDocument solrDoc = new SolrDocument();
solrDoc.addField("id", "1");
solrDoc.addField("subject", "hello2");
solrDoc.addField("title", "hello3");
solrDoc.addField("score", "0.7");
+ solrDoc.setField("_children_", childList);
SolrDocumentList list = new SolrDocumentList();
list.setNumFound(1);
@@ -163,8 +177,12 @@ public class JSONWriterTest extends SolrTestCaseJ4 {
result.contains("\"title\""));
assertTrue("response doesn't contain expected fields: " + result,
result.contains("\"id\"") &&
- result.contains("\"score\""));
+ result.contains("\"score\"") && result.contains("_children_"));
+ String expectedResult = "{'response':{'numFound':1,'start':0,'maxScore':0.7,'docs':[{'id':'1', 'score':'0.7'," +
+ " '_children_':{'numFound':1,'start':0,'docs':[{'id':'2', 'score':'0.4', 'path':['a>b', 'a>b>c']}] }}] }}";
+ String error = JSONTestUtil.match(result, "=="+expectedResult);
+ assertNull("response validation failed with error: " + error, error);
req.close();
}
diff --git a/solr/core/src/test/org/apache/solr/response/transform/TestSubQueryTransformerDistrib.java b/solr/core/src/test/org/apache/solr/response/transform/TestSubQueryTransformerDistrib.java
index 620cac0a942..f6d0a383fc6 100644
--- a/solr/core/src/test/org/apache/solr/response/transform/TestSubQueryTransformerDistrib.java
+++ b/solr/core/src/test/org/apache/solr/response/transform/TestSubQueryTransformerDistrib.java
@@ -16,7 +16,11 @@
*/
package org.apache.solr.response.transform;
+import java.io.ByteArrayOutputStream;
import java.io.IOException;
+import java.io.InputStream;
+import java.net.URL;
+import java.nio.charset.Charset;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
@@ -26,6 +30,8 @@ import java.util.List;
import java.util.Map;
import java.util.Random;
+import org.apache.commons.io.IOUtils;
+import org.apache.solr.JSONTestUtil;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.impl.CloudSolrClient;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
@@ -37,10 +43,12 @@ import org.apache.solr.cloud.SolrCloudTestCase;
import org.apache.solr.common.SolrDocument;
import org.apache.solr.common.SolrDocumentList;
import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.ContentStreamBase;
import org.junit.BeforeClass;
import org.junit.Test;
+@org.apache.solr.SolrTestCaseJ4.SuppressSSL()
public class TestSubQueryTransformerDistrib extends SolrCloudTestCase {
private static final String support = "These guys help customers";
@@ -92,7 +100,7 @@ public class TestSubQueryTransformerDistrib extends SolrCloudTestCase {
@SuppressWarnings("serial")
@Test
- public void test() throws SolrServerException, IOException {
+ public void test() throws Exception {
int peopleMultiplier = atLeast(1);
int deptMultiplier = atLeast(1);
@@ -100,24 +108,26 @@ public class TestSubQueryTransformerDistrib extends SolrCloudTestCase {
Random random1 = random();
+ final ModifiableSolrParams params = params(
+ new String[]{"q","name_s:dave", "indent","true",
+ "fl","*,depts:[subquery "+((random1.nextBoolean() ? "" : "separator=,"))+"]",
+ "rows","" + peopleMultiplier,
+ "depts.q","{!terms f=dept_id_s v=$row.dept_ss_dv "+((random1.nextBoolean() ? "" : "separator=,"))+"}",
+ "depts.fl","text_t"+(differentUniqueId?",id:notid":""),
+ "depts.indent","true",
+ "depts.collection","departments",
+ differentUniqueId ? "depts.distrib.singlePass":"notnecessary","true",
+ "depts.rows",""+(deptMultiplier*2),
+ "depts.logParamsList","q,fl,rows,row.dept_ss_dv",
+ random().nextBoolean()?"depts.wt":"whatever",anyWt(),
+ random().nextBoolean()?"wt":"whatever",anyWt()});
+
+ final SolrDocumentList hits;
{
-
- final QueryRequest qr = new QueryRequest(params(
- new String[]{"q","name_s:dave", "indent","true",
- "fl","*,depts:[subquery "+((random1.nextBoolean() ? "" : "separator=,"))+"]",
- "rows","" + peopleMultiplier,
- "depts.q","{!terms f=dept_id_s v=$row.dept_ss_dv "+((random1.nextBoolean() ? "" : "separator=,"))+"}",
- "depts.fl","text_t"+(differentUniqueId?",id:notid":""),
- "depts.indent","true",
- "depts.collection","departments",
- differentUniqueId ? "depts.distrib.singlePass":"notnecessary","true",
- "depts.rows",""+(deptMultiplier*2),
- "depts.logParamsList","q,fl,rows,row.dept_ss_dv",
- random().nextBoolean()?"depts.wt":"whatever",anyWt(),
- random().nextBoolean()?"wt":"whatever",anyWt()}));
+ final QueryRequest qr = new QueryRequest(params);
final QueryResponse rsp = new QueryResponse();
- rsp.setResponse(cluster.getSolrClient().request(qr, people));
- final SolrDocumentList hits = rsp.getResults();
+ rsp.setResponse(cluster.getSolrClient().request(qr, people+","+depts));
+ hits = rsp.getResults();
assertEquals(peopleMultiplier, hits.getNumFound());
@@ -140,6 +150,21 @@ public class TestSubQueryTransformerDistrib extends SolrCloudTestCase {
}
assertEquals(hits.toString(), engineerCount, supportCount);
}
+
+ params.set("wt", "json");
+ final URL node = new URL(cluster.getRandomJetty(random()).getBaseUrl().toString()
+ +"/"+people+"/select"+params.toQueryString());
+
+ try(final InputStream jsonResponse = node.openStream()){
+ final ByteArrayOutputStream outBuffer = new ByteArrayOutputStream();
+ IOUtils.copy(jsonResponse, outBuffer);
+
+ final Object expected = ((SolrDocumentList) hits.get(0).getFieldValue("depts")).get(0).get("text_t");
+ final String err = JSONTestUtil.match("/response/docs/[0]/depts/docs/[0]/text_t"
+ ,outBuffer.toString(Charset.forName("UTF-8").toString()),
+ "\""+expected+"\"");
+ assertNull(err,err);
+ }
}
diff --git a/solr/core/src/test/org/apache/solr/search/TestQueryWrapperFilter.java b/solr/core/src/test/org/apache/solr/search/TestQueryWrapperFilter.java
index bddb269675f..72a7606a411 100644
--- a/solr/core/src/test/org/apache/solr/search/TestQueryWrapperFilter.java
+++ b/solr/core/src/test/org/apache/solr/search/TestQueryWrapperFilter.java
@@ -228,7 +228,7 @@ public class TestQueryWrapperFilter extends LuceneTestCase {
final IndexSearcher searcher = new IndexSearcher(reader);
searcher.setQueryCache(null); // to still have approximations
final Query query = new QueryWrapperFilter(new RandomApproximationQuery(new TermQuery(new Term("foo", "bar")), random()));
- final Weight weight = searcher.createNormalizedWeight(query, RandomPicks.randomFrom(random(), ScoreMode.values()));
+ final Weight weight = searcher.createWeight(searcher.rewrite(query), RandomPicks.randomFrom(random(), ScoreMode.values()), 1);
final Scorer scorer = weight.scorer(reader.leaves().get(0));
assertNotNull(scorer.twoPhaseIterator());
reader.close();
diff --git a/solr/core/src/test/org/apache/solr/search/function/TestFunctionQuery.java b/solr/core/src/test/org/apache/solr/search/function/TestFunctionQuery.java
index 32f603a2982..cc448b3c4e3 100644
--- a/solr/core/src/test/org/apache/solr/search/function/TestFunctionQuery.java
+++ b/solr/core/src/test/org/apache/solr/search/function/TestFunctionQuery.java
@@ -988,4 +988,82 @@ public class TestFunctionQuery extends SolrTestCaseJ4 {
/*id*/2, /*score*/5,
/*id*/1, /*score*/2);
}
+
+ @Test
+ public void testEqualFunction() {
+ clearIndex();
+ assertU(adoc("id", "1", "field1_s", "value1", "field2_s", "value1",
+ "field1_s_dv", "value1", "field2_s_dv", "value2", "field_b", "true"));
+ assertU(adoc("id", "2", "field1_s", "value1", "field2_s", "value2",
+ "field1_s_dv", "value1", "field2_s_dv", "value1", "field_b", "false"));
+ assertU(commit());
+
+ singleTest("field1_s", "if(eq(field1_s,field2_s),5,2)",
+ /*id*/1, /*score*/5,
+ /*id*/2, /*score*/2);
+ singleTest("field1_s_dv", "if(eq(field1_s_dv,field2_s_dv),5,2)",
+ /*id*/2, /*score*/5,
+ /*id*/1, /*score*/2);
+ singleTest("field1_s", "if(eq(field1_s,field1_s_dv),5,2)",
+ /*id*/1, /*score*/5,
+ /*id*/2, /*score*/5);
+ singleTest("field2_s", "if(eq(field2_s,field2_s_dv),5,2)",
+ /*id*/1, /*score*/2,
+ /*id*/2, /*score*/2);
+ singleTest("field2_s", "if(eq(field2_s,'value1'),5,2)",
+ /*id*/1, /*score*/5,
+ /*id*/2, /*score*/2);
+ singleTest("field1_s", "if(eq('value1','value1'),5,2)",
+ /*id*/1, /*score*/5,
+ /*id*/2, /*score*/5);
+ singleTest("field_b", "if(eq(if(field_b,'value1','value2'),'value1'),5,2)",
+ /*id*/1, /*score*/5,
+ /*id*/2, /*score*/2);
+ }
+
+ @Test
+ public void testEqualNumericComparisons() {
+ clearIndex();
+ assertU(adoc("id", "1", "field_d", "5.0", "field_i", "5"));
+ assertU(adoc("id", "2", "field_d", "3.0", "field_i", "3"));
+ assertU(commit());
+ singleTest("field_d", "if(eq(field_d,5),5,2)",
+ /*id*/1, /*score*/5,
+ /*id*/2, /*score*/2);
+ singleTest("field_d", "if(eq(field_d,5.0),5,2)",
+ /*id*/1, /*score*/5,
+ /*id*/2, /*score*/2);
+ singleTest("field_d", "if(eq(5,def(field_d,5)),5,2)",
+ /*id*/1, /*score*/5,
+ /*id*/2, /*score*/2);
+ singleTest("field_i", "if(eq(5.0,def(field_i,5)),5,2)",
+ /*id*/1, /*score*/5,
+ /*id*/2, /*score*/2);
+ singleTest("field_not_existed_i", "if(def(field_not_existed_i,5.0),5,2)",
+ /*id*/1, /*score*/5,
+ /*id*/2, /*score*/5);
+ singleTest("field_not_existed_i", "if(def(field_not_existed_i,5),5,2)",
+ /*id*/1, /*score*/5,
+ /*id*/2, /*score*/5);
+ }
+
+ @Test
+ public void testDifferentTypesComparisons() {
+ clearIndex();
+ assertU(adoc("id", "1", "field_s", "value"));
+ assertU(adoc("id", "2"));
+ assertU(commit());
+ singleTest("field_s", "if(eq(field_s,'value'),5,2)",
+ /*id*/1, /*score*/5,
+ /*id*/2, /*score*/2);
+ singleTest("field_s", "if(eq(def(field_s,5),5),5,2)",
+ /*id*/2, /*score*/5,
+ /*id*/1, /*score*/2);
+ singleTest("field_s", "if(eq(def(field_s,5),5.0),5,2)",
+ /*id*/2, /*score*/5,
+ /*id*/1, /*score*/2);
+ singleTest("field_s", "if(eq(def(field_s,'5'),5),5,2)",
+ /*id*/1, /*score*/2,
+ /*id*/2, /*score*/2);
+ }
}
diff --git a/solr/solr-ref-guide/src/function-queries.adoc b/solr/solr-ref-guide/src/function-queries.adoc
index 32991b399b4..e9e63de2e72 100644
--- a/solr/solr-ref-guide/src/function-queries.adoc
+++ b/solr/solr-ref-guide/src/function-queries.adoc
@@ -496,7 +496,8 @@ Returns `true` if any member of the field exists.
=== Comparison Functions
`gt`, `gte`, `lt`, `lte`, `eq`
-5 comparison functions: Greater Than, Greater Than or Equal, Less Than, Less Than or Equal, Equal
+5 comparison functions: Greater Than, Greater Than or Equal, Less Than, Less Than or Equal, Equal.
+`eq` works on not just numbers but essentially any value like a string field.
*Syntax Example*