From 87274d00ac17cbee79da2b162f8f169abdd61cd7 Mon Sep 17 00:00:00 2001
From: Michael McCandless
Date: Wed, 5 Jan 2011 11:16:40 +0000
Subject: [PATCH] LUCENE-2837: collapse Searcher/Searchable into IndexSearcher;
remove contrib/remote, MultiSearcher; absorb ParallelMultiSearcher into
IndexSearcher as optional ExecutorService to ctor
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1055416 13f79535-47bb-0310-9956-ffa450edef68
---
lucene/CHANGES.txt | 5 +
.../java/org/apache/lucene/ant/IndexTask.java | 3 +-
.../org/apache/lucene/ant/IndexTaskTest.java | 5 +-
.../org/apache/lucene/demo/SearchFiles.java | 7 +-
.../search/highlight/HighlighterTest.java | 63 --
.../lucli/src/java/lucli/LuceneMethods.java | 5 +-
.../lucene/index/memory/MemoryIndex.java | 7 +-
.../apache/lucene/search/BoostingQuery.java | 4 +-
.../lucene/search/FuzzyLikeThisQuery.java | 2 +-
.../lucene/search/ChainedFilterTest.java | 5 +-
.../search/regex/TestSpanRegexQuery.java | 29 -
.../surround/query/BooleanQueryTst.java | 3 +-
lucene/contrib/remote/build.xml | 34 -
lucene/contrib/remote/pom.xml.template | 36 --
.../lucene/search/RMIRemoteSearchable.java | 44 --
.../search/RemoteCachingWrapperFilter.java | 57 --
.../lucene/search/RemoteSearchable.java | 119 ----
lucene/contrib/remote/src/java/overview.html | 26 -
.../RemoteCachingWrapperFilterHelper.java | 59 --
.../apache/lucene/search/RemoteTestCase.java | 79 ---
.../TestRemoteCachingWrapperFilter.java | 129 ----
.../lucene/search/TestRemoteSearchable.java | 128 ----
.../apache/lucene/search/TestRemoteSort.java | 425 ------------
.../lucene/spatial/tier/TestCartesian.java | 8 +-
.../org/apache/lucene/wordnet/SynExpand.java | 3 +-
.../org/apache/lucene/wordnet/SynLookup.java | 3 +-
.../apache/lucene/wordnet/TestWordnet.java | 3 +-
.../org/apache/lucene/document/Document.java | 4 +-
.../apache/lucene/index/LogMergePolicy.java | 24 +-
.../apache/lucene/search/BooleanQuery.java | 6 +-
.../lucene/search/ConstantScoreQuery.java | 4 +-
.../lucene/search/DisjunctionMaxQuery.java | 4 +-
.../apache/lucene/search/FilteredQuery.java | 2 +-
.../apache/lucene/search/IndexSearcher.java | 603 ++++++++++++++++--
.../lucene/search/MatchAllDocsQuery.java | 4 +-
.../lucene/search/MultiPhraseQuery.java | 4 +-
.../apache/lucene/search/MultiSearcher.java | 461 -------------
.../lucene/search/ParallelMultiSearcher.java | 290 ---------
.../org/apache/lucene/search/PhraseQuery.java | 4 +-
.../java/org/apache/lucene/search/Query.java | 78 +--
.../org/apache/lucene/search/Searchable.java | 165 -----
.../org/apache/lucene/search/Searcher.java | 183 ------
.../org/apache/lucene/search/Similarity.java | 6 +-
.../org/apache/lucene/search/TermQuery.java | 4 +-
.../search/function/CustomScoreQuery.java | 6 +-
.../search/function/ValueSourceQuery.java | 4 +-
.../search/payloads/PayloadNearQuery.java | 6 +-
.../search/payloads/PayloadTermQuery.java | 6 +-
.../search/spans/FieldMaskingSpanQuery.java | 6 +-
.../apache/lucene/search/spans/SpanQuery.java | 4 +-
.../lucene/search/spans/SpanWeight.java | 2 +-
.../test/org/apache/lucene/TestSearch.java | 2 +-
.../lucene/TestSearchForDuplicates.java | 8 +-
.../apache/lucene/document/TestDocument.java | 5 +-
.../index/TestBackwardsCompatibility.java | 4 +-
.../lucene/index/TestLazyProxSkipping.java | 3 +-
.../apache/lucene/index/TestNRTThreads.java | 11 +-
.../org/apache/lucene/index/TestOmitTf.java | 4 +-
.../org/apache/lucene/search/CheckHits.java | 28 +-
.../lucene/search/JustCompileSearch.java | 114 ----
.../org/apache/lucene/search/QueryUtils.java | 59 +-
.../search/TestBooleanMinShouldMatch.java | 2 +-
.../lucene/search/TestConstantScoreQuery.java | 2 +-
.../lucene/search/TestCustomSearcherSort.java | 21 +-
.../search/TestDisjunctionMaxQuery.java | 2 +-
.../lucene/search/TestMultiSearcher.java | 454 -------------
.../search/TestMultiSearcherRanking.java | 173 -----
.../TestMultiValuedNumericRangeQuery.java | 2 +-
.../org/apache/lucene/search/TestNot.java | 2 +-
.../search/TestParallelMultiSearcher.java | 51 --
.../apache/lucene/search/TestPhraseQuery.java | 2 +-
.../lucene/search/TestRegexpRandom.java | 2 +-
.../apache/lucene/search/TestSimilarity.java | 4 +-
.../lucene/search/TestSimpleExplanations.java | 81 ---
.../org/apache/lucene/search/TestSort.java | 140 +---
.../lucene/search/TestTermRangeQuery.java | 2 +-
.../search/TestTimeLimitingCollector.java | 2 +-
.../lucene/search/TestWildcardRandom.java | 2 +-
.../search/function/TestCustomScoreQuery.java | 2 +-
.../search/payloads/TestPayloadNearQuery.java | 5 +-
.../spans/TestSpanMultiTermQueryWrapper.java | 3 +-
.../apache/lucene/search/spans/TestSpans.java | 7 +-
.../search/spans/TestSpansAdvanced.java | 2 +-
.../lucene/collation/CollationTestBase.java | 7 +-
.../benchmark/byTask/tasks/ReadTask.java | 11 +-
.../benchmark/quality/QualityBenchmark.java | 6 +-
.../benchmark/quality/trec/QueryDriver.java | 5 +-
.../quality/utils/DocNameExtractor.java | 4 +-
.../quality/utils/SubmissionReport.java | 4 +-
.../org/apache/solr/schema/LatLonType.java | 6 +-
.../solr/search/LuceneQueryOptimizer.java | 2 +-
.../solr/search/SolrConstantScoreQuery.java | 4 +-
.../org/apache/solr/search/SolrFilter.java | 4 +-
.../apache/solr/search/ValueSourceParser.java | 4 +-
.../solr/search/function/BoostedQuery.java | 10 +-
.../search/function/DocFreqValueSource.java | 6 +-
.../search/function/DualFloatFunction.java | 4 +-
.../solr/search/function/FunctionQuery.java | 6 +-
.../solr/search/function/IDFValueSource.java | 4 +-
.../search/function/LinearFloatFunction.java | 4 +-
.../search/function/MaxDocValueSource.java | 6 +-
.../search/function/MaxFloatFunction.java | 4 +-
.../search/function/MultiFloatFunction.java | 4 +-
.../solr/search/function/NormValueSource.java | 6 +-
.../search/function/QueryValueSource.java | 2 +-
.../function/RangeMapFloatFunction.java | 4 +-
.../function/ReciprocalFloatFunction.java | 4 +-
.../search/function/ScaleFloatFunction.java | 4 +-
.../solr/search/function/SingleFunction.java | 4 +-
.../solr/search/function/TFValueSource.java | 4 +-
.../solr/search/function/ValueSource.java | 4 +-
.../function/ValueSourceRangeFilter.java | 4 +-
.../search/function/VectorValueSource.java | 4 +-
.../distance/GeohashHaversineFunction.java | 4 +-
.../distance/HaversineConstFunction.java | 4 +-
.../function/distance/HaversineFunction.java | 4 +-
.../distance/VectorDistanceFunction.java | 4 +-
117 files changed, 785 insertions(+), 3748 deletions(-)
delete mode 100644 lucene/contrib/remote/build.xml
delete mode 100644 lucene/contrib/remote/pom.xml.template
delete mode 100644 lucene/contrib/remote/src/java/org/apache/lucene/search/RMIRemoteSearchable.java
delete mode 100644 lucene/contrib/remote/src/java/org/apache/lucene/search/RemoteCachingWrapperFilter.java
delete mode 100644 lucene/contrib/remote/src/java/org/apache/lucene/search/RemoteSearchable.java
delete mode 100644 lucene/contrib/remote/src/java/overview.html
delete mode 100644 lucene/contrib/remote/src/test/org/apache/lucene/search/RemoteCachingWrapperFilterHelper.java
delete mode 100644 lucene/contrib/remote/src/test/org/apache/lucene/search/RemoteTestCase.java
delete mode 100644 lucene/contrib/remote/src/test/org/apache/lucene/search/TestRemoteCachingWrapperFilter.java
delete mode 100644 lucene/contrib/remote/src/test/org/apache/lucene/search/TestRemoteSearchable.java
delete mode 100644 lucene/contrib/remote/src/test/org/apache/lucene/search/TestRemoteSort.java
delete mode 100644 lucene/src/java/org/apache/lucene/search/MultiSearcher.java
delete mode 100644 lucene/src/java/org/apache/lucene/search/ParallelMultiSearcher.java
delete mode 100644 lucene/src/java/org/apache/lucene/search/Searchable.java
delete mode 100644 lucene/src/java/org/apache/lucene/search/Searcher.java
delete mode 100644 lucene/src/test/org/apache/lucene/search/TestMultiSearcher.java
delete mode 100644 lucene/src/test/org/apache/lucene/search/TestMultiSearcherRanking.java
delete mode 100644 lucene/src/test/org/apache/lucene/search/TestParallelMultiSearcher.java
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 64484d37008..78a75fd5db7 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -123,6 +123,11 @@ Changes in backwards compatibility policy
you really want a top-level norms, use MultiNorms or SlowMultiReaderWrapper.
(Uwe Schindler, Robert Muir)
+* LUCENE-2837: Collapsed Searcher, Searchable into IndexSearcher;
+ removed contrib/remote and MultiSearcher (Mike McCandless); absorbed
+ ParallelMultiSearcher into IndexSearcher as an optional
+ ExecutorServiced passed to its ctor. (Mike McCandless)
+
Changes in Runtime Behavior
* LUCENE-2650, LUCENE-2825: The behavior of FSDirectory.open has changed. On 64-bit
diff --git a/lucene/contrib/ant/src/java/org/apache/lucene/ant/IndexTask.java b/lucene/contrib/ant/src/java/org/apache/lucene/ant/IndexTask.java
index ef76424073e..b22638c713a 100644
--- a/lucene/contrib/ant/src/java/org/apache/lucene/ant/IndexTask.java
+++ b/lucene/contrib/ant/src/java/org/apache/lucene/ant/IndexTask.java
@@ -44,7 +44,6 @@ import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.ScoreDoc;
-import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;
@@ -268,7 +267,7 @@ public class IndexTask extends Task {
FSDirectory dir = FSDirectory.open(indexDir);
try {
- Searcher searcher = null;
+ IndexSearcher searcher = null;
boolean checkLastModified = false;
if (!create) {
try {
diff --git a/lucene/contrib/ant/src/test/org/apache/lucene/ant/IndexTaskTest.java b/lucene/contrib/ant/src/test/org/apache/lucene/ant/IndexTaskTest.java
index ee27d13b214..ffe205f50f1 100644
--- a/lucene/contrib/ant/src/test/org/apache/lucene/ant/IndexTaskTest.java
+++ b/lucene/contrib/ant/src/test/org/apache/lucene/ant/IndexTaskTest.java
@@ -18,14 +18,13 @@ package org.apache.lucene.ant;
*/
import java.io.File;
-import java.io.IOException;
+import java.io.IOException; // javadoc
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.core.StopAnalyzer;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
-import org.apache.lucene.search.Searcher;
import org.apache.lucene.store.Directory;
import org.apache.tools.ant.Project;
import org.apache.tools.ant.types.FileSet;
@@ -39,7 +38,7 @@ public class IndexTaskTest extends LuceneTestCase {
private final static String docHandler =
"org.apache.lucene.ant.FileExtensionDocumentHandler";
- private Searcher searcher;
+ private IndexSearcher searcher;
private Analyzer analyzer;
private Directory dir;
diff --git a/lucene/contrib/demo/src/java/org/apache/lucene/demo/SearchFiles.java b/lucene/contrib/demo/src/java/org/apache/lucene/demo/SearchFiles.java
index f78ce137985..422e23497d7 100644
--- a/lucene/contrib/demo/src/java/org/apache/lucene/demo/SearchFiles.java
+++ b/lucene/contrib/demo/src/java/org/apache/lucene/demo/SearchFiles.java
@@ -34,7 +34,6 @@ import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Scorer;
-import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.TopScoreDocCollector;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;
@@ -92,7 +91,7 @@ public class SearchFiles {
IndexReader reader = IndexReader.open(FSDirectory.open(new File(index)), true); // only searching, so read-only=true
- Searcher searcher = new IndexSearcher(reader);
+ IndexSearcher searcher = new IndexSearcher(reader);
Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_CURRENT);
BufferedReader in = null;
@@ -144,7 +143,7 @@ public class SearchFiles {
* This simulates the streaming search use case, where all hits are supposed to
* be processed, regardless of their relevance.
*/
- public static void doStreamingSearch(final Searcher searcher, Query query) throws IOException {
+ public static void doStreamingSearch(final IndexSearcher searcher, Query query) throws IOException {
Collector streamingHitCollector = new Collector() {
private Scorer scorer;
private int docBase;
@@ -186,7 +185,7 @@ public class SearchFiles {
* is executed another time and all hits are collected.
*
*/
- public static void doPagingSearch(BufferedReader in, Searcher searcher, Query query,
+ public static void doPagingSearch(BufferedReader in, IndexSearcher searcher, Query query,
int hitsPerPage, boolean raw, boolean interactive) throws IOException {
// Collect enough docs to show 5 pages
diff --git a/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java b/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java
index b42fa173857..b9fa38174d4 100644
--- a/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java
+++ b/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java
@@ -58,7 +58,6 @@ import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.FilteredQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MultiPhraseQuery;
-import org.apache.lucene.search.MultiSearcher;
import org.apache.lucene.search.MultiTermQuery;
import org.apache.lucene.search.NumericRangeQuery;
import org.apache.lucene.search.PhraseQuery;
@@ -1301,68 +1300,6 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
assertEquals("XHTML Encoding should have worked:", rawDocContent, decodedSnippet);
}
- public void testMultiSearcher() throws Exception {
- // setup index 1
- Directory ramDir1 = newDirectory();
- IndexWriter writer1 = new IndexWriter(ramDir1, newIndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true)));
- Document d = new Document();
- Field f = new Field(FIELD_NAME, "multiOne", Field.Store.YES, Field.Index.ANALYZED);
- d.add(f);
- writer1.addDocument(d);
- writer1.optimize();
- writer1.close();
- IndexReader reader1 = IndexReader.open(ramDir1, true);
-
- // setup index 2
- Directory ramDir2 = newDirectory();
- IndexWriter writer2 = new IndexWriter(ramDir2, newIndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true)));
- d = new Document();
- f = new Field(FIELD_NAME, "multiTwo", Field.Store.YES, Field.Index.ANALYZED);
- d.add(f);
- writer2.addDocument(d);
- writer2.optimize();
- writer2.close();
- IndexReader reader2 = IndexReader.open(ramDir2, true);
-
- IndexSearcher searchers[] = new IndexSearcher[2];
- searchers[0] = new IndexSearcher(ramDir1, true);
- searchers[1] = new IndexSearcher(ramDir2, true);
- MultiSearcher multiSearcher = new MultiSearcher(searchers);
- QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, FIELD_NAME, new MockAnalyzer(MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true));
- parser.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
- query = parser.parse("multi*");
- if (VERBOSE) System.out.println("Searching for: " + query.toString(FIELD_NAME));
- // at this point the multisearcher calls combine(query[])
- hits = multiSearcher.search(query, null, 1000);
-
- // query = QueryParser.parse("multi*", FIELD_NAME, new StandardAnalyzer(TEST_VERSION));
- Query expandedQueries[] = new Query[2];
- expandedQueries[0] = query.rewrite(reader1);
- expandedQueries[1] = query.rewrite(reader2);
- query = query.combine(expandedQueries);
-
- // create an instance of the highlighter with the tags used to surround
- // highlighted text
- Highlighter highlighter = new Highlighter(this, new QueryTermScorer(query));
-
- for (int i = 0; i < hits.totalHits; i++) {
- String text = multiSearcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
- TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
- String highlightedText = highlighter.getBestFragment(tokenStream, text);
- if (VERBOSE) System.out.println(highlightedText);
- }
- assertTrue("Failed to find correct number of highlights " + numHighlights + " found",
- numHighlights == 2);
- reader1.close();
- reader2.close();
- searchers[0].close();
- searchers[1].close();
- ramDir1.close();
- ramDir2.close();
- }
-
public void testFieldSpecificHighlighting() throws Exception {
TestHighlightRunner helper = new TestHighlightRunner() {
diff --git a/lucene/contrib/lucli/src/java/lucli/LuceneMethods.java b/lucene/contrib/lucli/src/java/lucli/LuceneMethods.java
index 3b351d8e517..9aca8ee3f27 100644
--- a/lucene/contrib/lucli/src/java/lucli/LuceneMethods.java
+++ b/lucene/contrib/lucli/src/java/lucli/LuceneMethods.java
@@ -53,11 +53,10 @@ import org.apache.lucene.queryParser.MultiFieldQueryParser;
import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.search.Collector;
import org.apache.lucene.search.Explanation;
-import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Scorer;
-import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;
import org.apache.lucene.util.BytesRef;
@@ -73,7 +72,7 @@ class LuceneMethods {
private List fields; //Fields as a vector
private List indexedFields; //Fields as a vector
private String fieldsArray[]; //Fields as an array
- private Searcher searcher;
+ private IndexSearcher searcher;
private Query query; //current query string
private String analyzerClassFQN = null; // Analyzer class, if NULL, use default Analyzer
diff --git a/lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java b/lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
index 1d4a06cac93..b30adc7c7ad 100644
--- a/lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
+++ b/lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
@@ -51,7 +51,6 @@ import org.apache.lucene.index.FieldInvertState;
import org.apache.lucene.search.Collector;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
-import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Similarity;
import org.apache.lucene.store.RAMDirectory; // for javadocs
@@ -421,7 +420,7 @@ public class MemoryIndex implements Serializable {
if (query == null)
throw new IllegalArgumentException("query must not be null");
- Searcher searcher = createSearcher();
+ IndexSearcher searcher = createSearcher();
try {
final float[] scores = new float[1]; // inits to 0.0f (no match)
searcher.search(query, new Collector() {
@@ -738,7 +737,7 @@ public class MemoryIndex implements Serializable {
*/
private final class MemoryIndexReader extends IndexReader {
- private Searcher searcher; // needed to find searcher.getSimilarity()
+ private IndexSearcher searcher; // needed to find searcher.getSimilarity()
private MemoryIndexReader() {
super(); // avoid as much superclass baggage as possible
@@ -1135,7 +1134,7 @@ public class MemoryIndex implements Serializable {
return Similarity.getDefault();
}
- private void setSearcher(Searcher searcher) {
+ private void setSearcher(IndexSearcher searcher) {
this.searcher = searcher;
}
diff --git a/lucene/contrib/queries/src/java/org/apache/lucene/search/BoostingQuery.java b/lucene/contrib/queries/src/java/org/apache/lucene/search/BoostingQuery.java
index 104e2b29be0..5a5fa0b388d 100644
--- a/lucene/contrib/queries/src/java/org/apache/lucene/search/BoostingQuery.java
+++ b/lucene/contrib/queries/src/java/org/apache/lucene/search/BoostingQuery.java
@@ -23,7 +23,7 @@ import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.DefaultSimilarity;
import org.apache.lucene.search.Query;
-import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Similarity;
/**
* The BoostingQuery class can be used to effectively demote results that match a given query.
@@ -58,7 +58,7 @@ public class BoostingQuery extends Query {
BooleanQuery result = new BooleanQuery() {
@Override
- public Similarity getSimilarity(Searcher searcher) {
+ public Similarity getSimilarity(IndexSearcher searcher) {
return new DefaultSimilarity() {
@Override
diff --git a/lucene/contrib/queries/src/java/org/apache/lucene/search/FuzzyLikeThisQuery.java b/lucene/contrib/queries/src/java/org/apache/lucene/search/FuzzyLikeThisQuery.java
index 7a4a18d72b4..8c15b89c4a4 100644
--- a/lucene/contrib/queries/src/java/org/apache/lucene/search/FuzzyLikeThisQuery.java
+++ b/lucene/contrib/queries/src/java/org/apache/lucene/search/FuzzyLikeThisQuery.java
@@ -360,7 +360,7 @@ public class FuzzyLikeThisQuery extends Query
this.ignoreTF=ignoreTF;
}
@Override
- public Similarity getSimilarity(Searcher searcher)
+ public Similarity getSimilarity(IndexSearcher searcher)
{
Similarity result = super.getSimilarity(searcher);
result = new SimilarityDelegator(result) {
diff --git a/lucene/contrib/queries/src/test/org/apache/lucene/search/ChainedFilterTest.java b/lucene/contrib/queries/src/test/org/apache/lucene/search/ChainedFilterTest.java
index f5f7eb4b052..b2b16f8db96 100644
--- a/lucene/contrib/queries/src/test/org/apache/lucene/search/ChainedFilterTest.java
+++ b/lucene/contrib/queries/src/test/org/apache/lucene/search/ChainedFilterTest.java
@@ -29,11 +29,10 @@ import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.CachingWrapperFilter;
import org.apache.lucene.search.Filter;
-import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryWrapperFilter;
-import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TermRangeFilter;
import org.apache.lucene.search.TopDocs;
@@ -195,7 +194,7 @@ public class ChainedFilterTest extends LuceneTestCase {
IndexReader reader = writer.getReader();
writer.close();
- Searcher searcher = new IndexSearcher(reader);
+ IndexSearcher searcher = new IndexSearcher(reader);
Query query = new TermQuery(new Term("none", "none"));
diff --git a/lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestSpanRegexQuery.java b/lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestSpanRegexQuery.java
index 3ce218ca7dd..fd32f13abe6 100644
--- a/lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestSpanRegexQuery.java
+++ b/lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestSpanRegexQuery.java
@@ -27,10 +27,8 @@ import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.MultiSearcher;
import org.apache.lucene.search.spans.SpanFirstQuery;
import org.apache.lucene.search.spans.SpanMultiTermQueryWrapper;
-import org.apache.lucene.search.spans.SpanNearQuery;
import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.LockObtainFailedException;
@@ -85,33 +83,6 @@ public class TestSpanRegexQuery extends LuceneTestCase {
directory.close();
}
- public void testSpanRegexBug() throws CorruptIndexException, IOException {
- createRAMDirectories();
-
- SpanQuery srq = new SpanMultiTermQueryWrapper(new RegexQuery(new Term("field", "a.*")));
- SpanQuery stq = new SpanMultiTermQueryWrapper(new RegexQuery(new Term("field", "b.*")));
- SpanNearQuery query = new SpanNearQuery(new SpanQuery[] { srq, stq }, 6,
- true);
-
- // 1. Search the same store which works
- IndexSearcher[] arrSearcher = new IndexSearcher[2];
- arrSearcher[0] = new IndexSearcher(indexStoreA, true);
- arrSearcher[1] = new IndexSearcher(indexStoreB, true);
- MultiSearcher searcher = new MultiSearcher(arrSearcher);
- int numHits = searcher.search(query, null, 1000).totalHits;
- arrSearcher[0].close();
- arrSearcher[1].close();
-
- // Will fail here
- // We expect 2 but only one matched
- // The rewriter function only write it once on the first IndexSearcher
- // So it's using term: a1 b1 to search on the second IndexSearcher
- // As a result, it won't match the document in the second IndexSearcher
- assertEquals(2, numHits);
- indexStoreA.close();
- indexStoreB.close();
- }
-
private void createRAMDirectories() throws CorruptIndexException,
LockObtainFailedException, IOException {
// creating a document to store
diff --git a/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/surround/query/BooleanQueryTst.java b/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/surround/query/BooleanQueryTst.java
index 631b7d88235..c89127cde9d 100644
--- a/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/surround/query/BooleanQueryTst.java
+++ b/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/surround/query/BooleanQueryTst.java
@@ -21,7 +21,6 @@ import java.io.IOException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.Collector;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Query;
@@ -122,7 +121,7 @@ public class BooleanQueryTst {
/* if (verbose) System.out.println("Lucene: " + query.toString()); */
TestCollector tc = new TestCollector();
- Searcher searcher = new IndexSearcher(dBase.getDb(), true);
+ IndexSearcher searcher = new IndexSearcher(dBase.getDb(), true);
try {
searcher.search(query, tc);
} finally {
diff --git a/lucene/contrib/remote/build.xml b/lucene/contrib/remote/build.xml
deleted file mode 100644
index 3978b747a89..00000000000
--- a/lucene/contrib/remote/build.xml
+++ /dev/null
@@ -1,34 +0,0 @@
-
-
-
-
-
-
-
- Remote Searchable based on RMI
-
-
-
-
-
-
-
-
-
-
-
diff --git a/lucene/contrib/remote/pom.xml.template b/lucene/contrib/remote/pom.xml.template
deleted file mode 100644
index 7d4842f4dce..00000000000
--- a/lucene/contrib/remote/pom.xml.template
+++ /dev/null
@@ -1,36 +0,0 @@
-
-
-
-
- 4.0.0
-
- org.apache.lucene
- lucene-contrib
- @version@
-
- org.apache.lucene
- lucene-remote
- Lucene Remote
- @version@
- Remote Searchable based on RMI
- jar
-
diff --git a/lucene/contrib/remote/src/java/org/apache/lucene/search/RMIRemoteSearchable.java b/lucene/contrib/remote/src/java/org/apache/lucene/search/RMIRemoteSearchable.java
deleted file mode 100644
index 6cf8bb3ba08..00000000000
--- a/lucene/contrib/remote/src/java/org/apache/lucene/search/RMIRemoteSearchable.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.lucene.search;
-
-import java.rmi.Remote;
-
-/**
- * Marker interface to enable subclasses of {@link org.apache.lucene.search.Searchable} to be used via
- * Java RMI. Classes implementing this interface can be used as a RMI -
- * "remote object".
- *
- * {@link RMIRemoteSearchable} extends {@link org.apache.lucene.search.Searchable} and can transparently
- * be used as a such.
- *
- * Example usage:
- *
- *
- * RMIRemoteSearchable remoteObject = ...;
- * String remoteObjectName = ...;
- * Naming.rebind (remoteObjectName, remoteObject);
- * Searchable luceneSearchable = (Searchable) Naming.lookup (remoteObjectName);
- *
- *
- *
- *
- */
-public interface RMIRemoteSearchable extends Searchable, Remote {
-
-}
diff --git a/lucene/contrib/remote/src/java/org/apache/lucene/search/RemoteCachingWrapperFilter.java b/lucene/contrib/remote/src/java/org/apache/lucene/search/RemoteCachingWrapperFilter.java
deleted file mode 100644
index fe049b99815..00000000000
--- a/lucene/contrib/remote/src/java/org/apache/lucene/search/RemoteCachingWrapperFilter.java
+++ /dev/null
@@ -1,57 +0,0 @@
-package org.apache.lucene.search;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-
-import org.apache.lucene.index.IndexReader;
-
-/**
- * Provides caching of {@link org.apache.lucene.search.Filter}s themselves on the remote end of an RMI connection.
- * The cache is keyed on Filter's hashCode(), so if it sees the same filter twice
- * it will reuse the original version.
- * ");
- return;
- }
-
- // create and install a security manager
- if (System.getSecurityManager() == null) {
- System.setSecurityManager(new RMISecurityManager());
- }
-
- Searchable local = new IndexSearcher(FSDirectory.open(new File(indexName)), true);
- RemoteSearchable impl = new RemoteSearchable(local);
-
- // bind the implementation to "Searchable"
- Naming.rebind("//localhost/Searchable", impl);
- }
-
-}
diff --git a/lucene/contrib/remote/src/java/overview.html b/lucene/contrib/remote/src/java/overview.html
deleted file mode 100644
index e68ef1cef75..00000000000
--- a/lucene/contrib/remote/src/java/overview.html
+++ /dev/null
@@ -1,26 +0,0 @@
-
-
-
-
- remote
-
-
-
- remote
-
-
\ No newline at end of file
diff --git a/lucene/contrib/remote/src/test/org/apache/lucene/search/RemoteCachingWrapperFilterHelper.java b/lucene/contrib/remote/src/test/org/apache/lucene/search/RemoteCachingWrapperFilterHelper.java
deleted file mode 100644
index c718b2d6fef..00000000000
--- a/lucene/contrib/remote/src/test/org/apache/lucene/search/RemoteCachingWrapperFilterHelper.java
+++ /dev/null
@@ -1,59 +0,0 @@
-package org.apache.lucene.search;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-
-import junit.framework.Assert;
-
-import org.apache.lucene.index.IndexReader;
-
-/**
- * A unit test helper class to help with RemoteCachingWrapperFilter testing and
- * assert that it is working correctly.
- */
-public class RemoteCachingWrapperFilterHelper extends RemoteCachingWrapperFilter {
-
- private boolean shouldHaveCache;
-
- public RemoteCachingWrapperFilterHelper(Filter filter, boolean shouldHaveCache) {
- super(filter);
- this.shouldHaveCache = shouldHaveCache;
- }
-
- public void shouldHaveCache(boolean shouldHaveCache) {
- this.shouldHaveCache = shouldHaveCache;
- }
-
- @Override
- public DocIdSet getDocIdSet(IndexReader reader) throws IOException {
- Filter cachedFilter = FilterManager.getInstance().getFilter(filter);
-
- Assert.assertNotNull("Filter should not be null", cachedFilter);
- if (!shouldHaveCache) {
- Assert.assertSame("First time filter should be the same ", filter, cachedFilter);
- } else {
- Assert.assertNotSame("We should have a cached version of the filter", filter, cachedFilter);
- }
-
- if (filter instanceof CachingWrapperFilterHelper) {
- ((CachingWrapperFilterHelper)cachedFilter).setShouldHaveCache(shouldHaveCache);
- }
- return cachedFilter.getDocIdSet(reader);
- }
-}
diff --git a/lucene/contrib/remote/src/test/org/apache/lucene/search/RemoteTestCase.java b/lucene/contrib/remote/src/test/org/apache/lucene/search/RemoteTestCase.java
deleted file mode 100644
index 351bf7c7a41..00000000000
--- a/lucene/contrib/remote/src/test/org/apache/lucene/search/RemoteTestCase.java
+++ /dev/null
@@ -1,79 +0,0 @@
-package org.apache.lucene.search;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-import java.net.MalformedURLException;
-import java.net.ServerSocket;
-import java.net.Socket;
-import java.rmi.Naming;
-import java.rmi.NotBoundException;
-import java.rmi.RemoteException;
-import java.rmi.registry.LocateRegistry;
-import java.rmi.server.RMIClientSocketFactory;
-import java.rmi.server.RMIServerSocketFactory;
-
-import org.apache.lucene.util.LuceneTestCase;
-import org.junit.AfterClass;
-
-/**
- * Base class for remote tests.
- *
- * Call {@link #startServer(Searchable)} in a {@link #BeforeClass} annotated method
- * to start the server.
- * Call {@link #lookupRemote} to get a RemoteSearchable.
- */
-public abstract class RemoteTestCase extends LuceneTestCase {
- private static int port;
-
- public static void startServer(Searchable searchable) throws Exception {
- // publish it
- // use our own factories for testing, so we can bind to an ephemeral port.
- RMIClientSocketFactory clientFactory = new RMIClientSocketFactory() {
- public Socket createSocket(String host, int port) throws IOException {
- return new Socket(host, port);
- }};
-
- class TestRMIServerSocketFactory implements RMIServerSocketFactory {
- ServerSocket socket;
- public ServerSocket createServerSocket(int port) throws IOException {
- return (socket = new ServerSocket(port));
- }
- };
- TestRMIServerSocketFactory serverFactory = new TestRMIServerSocketFactory();
-
- LocateRegistry.createRegistry(0, clientFactory, serverFactory);
- RemoteSearchable impl = new RemoteSearchable(searchable);
- port = serverFactory.socket.getLocalPort();
- Naming.rebind("//localhost:" + port + "/Searchable", impl);
- }
-
- @AfterClass
- public static void stopServer() {
- try {
- Naming.unbind("//localhost:" + port + "/Searchable");
- } catch (RemoteException e) {
- } catch (MalformedURLException e) {
- } catch (NotBoundException e) {
- }
- }
-
- public static Searchable lookupRemote() throws Exception {
- return (Searchable)Naming.lookup("//localhost:" + port + "/Searchable");
- }
-}
diff --git a/lucene/contrib/remote/src/test/org/apache/lucene/search/TestRemoteCachingWrapperFilter.java b/lucene/contrib/remote/src/test/org/apache/lucene/search/TestRemoteCachingWrapperFilter.java
deleted file mode 100644
index 868a3fc1394..00000000000
--- a/lucene/contrib/remote/src/test/org/apache/lucene/search/TestRemoteCachingWrapperFilter.java
+++ /dev/null
@@ -1,129 +0,0 @@
-package org.apache.lucene.search;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.util.Map;
-
-import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.search.FilterManager.FilterItem;
-import org.apache.lucene.store.Directory;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-/**
- * Tests that the index is cached on the searcher side of things.
- */
-public class TestRemoteCachingWrapperFilter extends RemoteTestCase {
- private static Directory indexStore;
- private static Searchable local;
-
- @BeforeClass
- public static void beforeClass() throws Exception {
- // construct an index
- indexStore = newDirectory();
- IndexWriter writer = new IndexWriter(indexStore, newIndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer()));
- Document doc = new Document();
- doc.add(newField("test", "test text", Field.Store.YES, Field.Index.ANALYZED));
- doc.add(newField("type", "A", Field.Store.YES, Field.Index.ANALYZED));
- doc.add(newField("other", "other test text", Field.Store.YES, Field.Index.ANALYZED));
- writer.addDocument(doc);
- //Need a second document to search for
- doc = new Document();
- doc.add(newField("test", "test text", Field.Store.YES, Field.Index.ANALYZED));
- doc.add(newField("type", "B", Field.Store.YES, Field.Index.ANALYZED));
- doc.add(newField("other", "other test text", Field.Store.YES, Field.Index.ANALYZED));
- writer.addDocument(doc);
- writer.optimize();
- writer.close();
- local = new IndexSearcher(indexStore, true);
- startServer(local);
- }
-
- @Before
- public void setUp () throws Exception {
- super.setUp();
- // to support test iteration > 1
- Map cache = FilterManager.getInstance().cache;
- synchronized(cache){
- cache.clear();
-
- }
- }
-
- @AfterClass
- public static void afterClass() throws Exception {
- local.close();
- indexStore.close();
- indexStore = null;
- }
-
- private static void search(Query query, Filter filter, int hitNumber, String typeValue) throws Exception {
- Searchable[] searchables = { lookupRemote() };
- Searcher searcher = new MultiSearcher(searchables);
- ScoreDoc[] result = searcher.search(query,filter, 1000).scoreDocs;
- assertEquals(1, result.length);
- Document document = searcher.doc(result[hitNumber].doc);
- assertTrue("document is null and it shouldn't be", document != null);
- assertEquals(typeValue, document.get("type"));
- assertTrue("document.getFields() Size: " + document.getFields().size() + " is not: " + 3, document.getFields().size() == 3);
- }
-
- @Test
- public void testTermRemoteFilter() throws Exception {
- CachingWrapperFilterHelper cwfh = new CachingWrapperFilterHelper(new QueryWrapperFilter(new TermQuery(new Term("type", "a"))));
-
- // This is what we are fixing - if one uses a CachingWrapperFilter(Helper) it will never
- // cache the filter on the remote site
- cwfh.setShouldHaveCache(false);
- search(new TermQuery(new Term("test", "test")), cwfh, 0, "A");
- cwfh.setShouldHaveCache(false);
- search(new TermQuery(new Term("test", "test")), cwfh, 0, "A");
-
- // This is how we fix caching - we wrap a Filter in the RemoteCachingWrapperFilter(Handler - for testing)
- // to cache the Filter on the searcher (remote) side
- RemoteCachingWrapperFilterHelper rcwfh = new RemoteCachingWrapperFilterHelper(cwfh, false);
- search(new TermQuery(new Term("test", "test")), rcwfh, 0, "A");
-
- // 2nd time we do the search, we should be using the cached Filter
- rcwfh.shouldHaveCache(true);
- search(new TermQuery(new Term("test", "test")), rcwfh, 0, "A");
-
- // assert that we get the same cached Filter, even if we create a new instance of RemoteCachingWrapperFilter(Helper)
- // this should pass because the Filter parameters are the same, and the cache uses Filter's hashCode() as cache keys,
- // and Filters' hashCode() builds on Filter parameters, not the Filter instance itself
- rcwfh = new RemoteCachingWrapperFilterHelper(new QueryWrapperFilter(new TermQuery(new Term("type", "a"))), false);
- rcwfh.shouldHaveCache(false);
- search(new TermQuery(new Term("test", "test")), rcwfh, 0, "A");
-
- rcwfh = new RemoteCachingWrapperFilterHelper(new QueryWrapperFilter(new TermQuery(new Term("type", "a"))), false);
- rcwfh.shouldHaveCache(true);
- search(new TermQuery(new Term("test", "test")), rcwfh, 0, "A");
-
- // assert that we get a non-cached version of the Filter because this is a new Query (type:b)
- rcwfh = new RemoteCachingWrapperFilterHelper(new QueryWrapperFilter(new TermQuery(new Term("type", "b"))), false);
- rcwfh.shouldHaveCache(false);
- search(new TermQuery(new Term("type", "b")), rcwfh, 0, "B");
- }
-}
diff --git a/lucene/contrib/remote/src/test/org/apache/lucene/search/TestRemoteSearchable.java b/lucene/contrib/remote/src/test/org/apache/lucene/search/TestRemoteSearchable.java
deleted file mode 100644
index a2c5f759fcd..00000000000
--- a/lucene/contrib/remote/src/test/org/apache/lucene/search/TestRemoteSearchable.java
+++ /dev/null
@@ -1,128 +0,0 @@
-package org.apache.lucene.search;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.*;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.store.Directory;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import java.util.Collections;
-import java.util.Set;
-import java.util.HashSet;
-
-public class TestRemoteSearchable extends RemoteTestCase {
- private static Directory indexStore;
- private static Searchable local;
-
- @BeforeClass
- public static void beforeClass() throws Exception {
- // construct an index
- indexStore = newDirectory();
- IndexWriter writer = new IndexWriter(indexStore, newIndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer()));
- Document doc = new Document();
- doc.add(newField("test", "test text", Field.Store.YES, Field.Index.ANALYZED));
- doc.add(newField("other", "other test text", Field.Store.YES, Field.Index.ANALYZED));
- writer.addDocument(doc);
- writer.optimize();
- writer.close();
- local = new IndexSearcher(indexStore, true);
- startServer(local);
- }
-
- @AfterClass
- public static void afterClass() throws Exception {
- local.close();
- indexStore.close();
- indexStore = null;
- }
-
- private static void search(Query query) throws Exception {
- // try to search the published index
- Searchable[] searchables = { lookupRemote() };
- Searcher searcher = new MultiSearcher(searchables);
- ScoreDoc[] result = searcher.search(query, null, 1000).scoreDocs;
-
- assertEquals(1, result.length);
- Document document = searcher.doc(result[0].doc);
- assertTrue("document is null and it shouldn't be", document != null);
- assertEquals("test text", document.get("test"));
- assertTrue("document.getFields() Size: " + document.getFields().size() + " is not: " + 2, document.getFields().size() == 2);
- Set ftl = new HashSet();
- ftl.add("other");
- FieldSelector fs = new SetBasedFieldSelector(ftl, Collections.emptySet());
- document = searcher.doc(0, fs);
- assertTrue("document is null and it shouldn't be", document != null);
- assertTrue("document.getFields() Size: " + document.getFields().size() + " is not: " + 1, document.getFields().size() == 1);
- fs = new MapFieldSelector("other");
- document = searcher.doc(0, fs);
- assertTrue("document is null and it shouldn't be", document != null);
- assertTrue("document.getFields() Size: " + document.getFields().size() + " is not: " + 1, document.getFields().size() == 1);
- }
-
- @Test
- public void testTermQuery() throws Exception {
- search(new TermQuery(new Term("test", "test")));
- }
-
- @Test
- public void testBooleanQuery() throws Exception {
- BooleanQuery query = new BooleanQuery();
- query.add(new TermQuery(new Term("test", "test")), BooleanClause.Occur.MUST);
- search(query);
- }
-
- @Test
- public void testPhraseQuery() throws Exception {
- PhraseQuery query = new PhraseQuery();
- query.add(new Term("test", "test"));
- query.add(new Term("test", "text"));
- search(query);
- }
-
- // Tests bug fix at http://nagoya.apache.org/bugzilla/show_bug.cgi?id=20290
- @Test
- public void testQueryFilter() throws Exception {
- // try to search the published index
- Searchable[] searchables = { lookupRemote() };
- Searcher searcher = new MultiSearcher(searchables);
- ScoreDoc[] hits = searcher.search(
- new TermQuery(new Term("test", "text")),
- new QueryWrapperFilter(new TermQuery(new Term("test", "test"))), 1000).scoreDocs;
- assertEquals(1, hits.length);
- ScoreDoc[] nohits = searcher.search(
- new TermQuery(new Term("test", "text")),
- new QueryWrapperFilter(new TermQuery(new Term("test", "non-existent-term"))), 1000).scoreDocs;
- assertEquals(0, nohits.length);
- }
-
- @Test
- public void testConstantScoreQuery() throws Exception {
- // try to search the published index
- Searchable[] searchables = { lookupRemote() };
- Searcher searcher = new MultiSearcher(searchables);
- ScoreDoc[] hits = searcher.search(
- new ConstantScoreQuery(new TermQuery(new Term("test", "test"))), null, 1000).scoreDocs;
- assertEquals(1, hits.length);
- }
-}
diff --git a/lucene/contrib/remote/src/test/org/apache/lucene/search/TestRemoteSort.java b/lucene/contrib/remote/src/test/org/apache/lucene/search/TestRemoteSort.java
deleted file mode 100644
index 1f0ad581f9f..00000000000
--- a/lucene/contrib/remote/src/test/org/apache/lucene/search/TestRemoteSort.java
+++ /dev/null
@@ -1,425 +0,0 @@
-package org.apache.lucene.search;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.Locale;
-import java.util.Random;
-
-import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.BytesRef;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-/**
- * Unit tests for remote sorting code.
- * Note: This is a modified copy of {@link TestSort} without duplicated test
- * methods and therefore unused members and methodes.
- */
-
-public class TestRemoteSort extends RemoteTestCase {
-
- private static IndexSearcher full;
- private static Directory indexStore;
- private Query queryX;
- private Query queryY;
- private Query queryA;
- private Query queryF;
- private Sort sort;
-
- // document data:
- // the tracer field is used to determine which document was hit
- // the contents field is used to search and sort by relevance
- // the int field to sort by int
- // the float field to sort by float
- // the string field to sort by string
- // the i18n field includes accented characters for testing locale-specific sorting
- private static final String[][] data = new String[][] {
- // tracer contents int float string custom i18n long double, 'short', byte, 'custom parser encoding'
- { "A", "x a", "5", "4f", "c", "A-3", "p\u00EAche", "10", "-4.0", "3", "126", "J"},//A, x
- { "B", "y a", "5", "3.4028235E38", "i", "B-10", "HAT", "1000000000", "40.0", "24", "1", "I"},//B, y
- { "C", "x a b c", "2147483647", "1.0", "j", "A-2", "p\u00E9ch\u00E9", "99999999", "40.00002343", "125", "15", "H"},//C, x
- { "D", "y a b c", "-1", "0.0f", "a", "C-0", "HUT", String.valueOf(Long.MAX_VALUE), String.valueOf(Double.MIN_VALUE), String.valueOf(Short.MIN_VALUE), String.valueOf(Byte.MIN_VALUE), "G"},//D, y
- { "E", "x a b c d", "5", "2f", "h", "B-8", "peach", String.valueOf(Long.MIN_VALUE), String.valueOf(Double.MAX_VALUE), String.valueOf(Short.MAX_VALUE), String.valueOf(Byte.MAX_VALUE), "F"},//E,x
- { "F", "y a b c d", "2", "3.14159f", "g", "B-1", "H\u00C5T", "-44", "343.034435444", "-3", "0", "E"},//F,y
- { "G", "x a b c d", "3", "-1.0", "f", "C-100", "sin", "323254543543", "4.043544", "5", "100", "D"},//G,x
- { "H", "y a b c d", "0", "1.4E-45", "e", "C-88", "H\u00D8T", "1023423423005","4.043545", "10", "-50", "C"},//H,y
- { "I", "x a b c d e f", "-2147483648", "1.0e+0", "d", "A-10", "s\u00EDn", "332422459999", "4.043546", "-340", "51", "B"},//I,x
- { "J", "y a b c d e f", "4", ".5", "b", "C-7", "HOT", "34334543543", "4.0000220343", "300", "2", "A"},//J,y
- { "W", "g", "1", null, null, null, null, null, null, null, null, null},
- { "X", "g", "1", "0.1", null, null, null, null, null, null, null, null},
- { "Y", "g", "1", "0.2", null, null, null, null, null, null, null, null},
- { "Z", "f g", null, null, null, null, null, null, null, null, null, null}
- };
-
- // create an index of all the documents, or just the x, or just the y documents
- @BeforeClass
- public static void beforeClass() throws Exception {
- indexStore = newDirectory();
- IndexWriter writer = new IndexWriter(
- indexStore,
- newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
- setMaxBufferedDocs(2).
- setMergePolicy(newLogMergePolicy(1000))
- );
- for (int i=0; i value(int slot) {
- return Integer.valueOf(slotValues[slot]);
- }
- }
-
- static class MyFieldComparatorSource extends FieldComparatorSource {
- @Override
- public FieldComparator newComparator(String fieldname, int numHits, int sortPos, boolean reversed) {
- return new MyFieldComparator(numHits);
- }
- }
-
- // test a variety of sorts using a remote searcher
- @Test
- public void testRemoteSort() throws Exception {
- Searchable searcher = lookupRemote();
- MultiSearcher multi = new MultiSearcher (searcher);
- runMultiSorts(multi, true); // this runs on the full index
- }
-
- // test custom search when remote
- /* rewrite with new API
- public void testRemoteCustomSort() throws Exception {
- Searchable searcher = getRemote();
- MultiSearcher multi = new MultiSearcher (new Searchable[] { searcher });
- sort.setSort (new SortField ("custom", SampleComparable.getComparatorSource()));
- assertMatches (multi, queryX, sort, "CAIEG");
- sort.setSort (new SortField ("custom", SampleComparable.getComparatorSource(), true));
- assertMatches (multi, queryY, sort, "HJDBF");
-
- assertSaneFieldCaches(getName() + " ComparatorSource");
- FieldCache.DEFAULT.purgeAllCaches();
-
- SortComparator custom = SampleComparable.getComparator();
- sort.setSort (new SortField ("custom", custom));
- assertMatches (multi, queryX, sort, "CAIEG");
- sort.setSort (new SortField ("custom", custom, true));
- assertMatches (multi, queryY, sort, "HJDBF");
-
- assertSaneFieldCaches(getName() + " Comparator");
- FieldCache.DEFAULT.purgeAllCaches();
- }*/
-
- // test that the relevancy scores are the same even if
- // hits are sorted
- @Test
- public void testNormalizedScores() throws Exception {
-
- // capture relevancy scores
- HashMap scoresX = getScores (full.search (queryX, null, 1000).scoreDocs, full);
- HashMap scoresY = getScores (full.search (queryY, null, 1000).scoreDocs, full);
- HashMap scoresA = getScores (full.search (queryA, null, 1000).scoreDocs, full);
-
- // we'll test searching locally, remote and multi
- MultiSearcher remote = new MultiSearcher (lookupRemote());
-
- // change sorting and make sure relevancy stays the same
-
- sort = new Sort();
- assertSameValues (scoresX, getScores (remote.search (queryX, null, 1000, sort).scoreDocs, remote));
- assertSameValues (scoresY, getScores (remote.search (queryY, null, 1000, sort).scoreDocs, remote));
- assertSameValues (scoresA, getScores (remote.search (queryA, null, 1000, sort).scoreDocs, remote));
-
- sort.setSort(SortField.FIELD_DOC);
- assertSameValues (scoresX, getScores (remote.search (queryX, null, 1000, sort).scoreDocs, remote));
- assertSameValues (scoresY, getScores (remote.search (queryY, null, 1000, sort).scoreDocs, remote));
- assertSameValues (scoresA, getScores (remote.search (queryA, null, 1000, sort).scoreDocs, remote));
-
- sort.setSort (new SortField("int", SortField.INT));
- assertSameValues (scoresX, getScores (remote.search (queryX, null, 1000, sort).scoreDocs, remote));
- assertSameValues (scoresY, getScores (remote.search (queryY, null, 1000, sort).scoreDocs, remote));
- assertSameValues (scoresA, getScores (remote.search (queryA, null, 1000, sort).scoreDocs, remote));
-
- sort.setSort (new SortField("float", SortField.FLOAT));
- assertSameValues (scoresX, getScores (remote.search (queryX, null, 1000, sort).scoreDocs, remote));
- assertSameValues (scoresY, getScores (remote.search (queryY, null, 1000, sort).scoreDocs, remote));
- assertSameValues (scoresA, getScores (remote.search (queryA, null, 1000, sort).scoreDocs, remote));
-
- sort.setSort (new SortField("string", SortField.STRING));
- assertSameValues (scoresX, getScores (remote.search (queryX, null, 1000, sort).scoreDocs, remote));
- assertSameValues (scoresY, getScores (remote.search (queryY, null, 1000, sort).scoreDocs, remote));
- assertSameValues (scoresA, getScores (remote.search (queryA, null, 1000, sort).scoreDocs, remote));
-
- sort.setSort (new SortField("int", SortField.INT), new SortField("float", SortField.FLOAT));
- assertSameValues (scoresX, getScores (remote.search (queryX, null, 1000, sort).scoreDocs, remote));
- assertSameValues (scoresY, getScores (remote.search (queryY, null, 1000, sort).scoreDocs, remote));
- assertSameValues (scoresA, getScores (remote.search (queryA, null, 1000, sort).scoreDocs, remote));
-
- sort.setSort (new SortField ("int", SortField.INT, true), new SortField (null, SortField.DOC, true) );
- assertSameValues (scoresX, getScores (remote.search (queryX, null, 1000, sort).scoreDocs, remote));
- assertSameValues (scoresY, getScores (remote.search (queryY, null, 1000, sort).scoreDocs, remote));
- assertSameValues (scoresA, getScores (remote.search (queryA, null, 1000, sort).scoreDocs, remote));
-
- sort.setSort (new SortField("float", SortField.FLOAT));
- assertSameValues (scoresX, getScores (remote.search (queryX, null, 1000, sort).scoreDocs, remote));
- assertSameValues (scoresY, getScores (remote.search (queryY, null, 1000, sort).scoreDocs, remote));
- assertSameValues (scoresA, getScores (remote.search (queryA, null, 1000, sort).scoreDocs, remote));
- }
-
- // runs a variety of sorts useful for multisearchers
- private void runMultiSorts(Searcher multi, boolean isFull) throws Exception {
- sort.setSort(SortField.FIELD_DOC);
- String expected = isFull ? "ABCDEFGHIJ" : "ACEGIBDFHJ";
- assertMatches(multi, queryA, sort, expected);
-
- sort.setSort(new SortField ("int", SortField.INT));
- expected = isFull ? "IDHFGJABEC" : "IDHFGJAEBC";
- assertMatches(multi, queryA, sort, expected);
-
- sort.setSort(new SortField ("int", SortField.INT), SortField.FIELD_DOC);
- expected = isFull ? "IDHFGJABEC" : "IDHFGJAEBC";
- assertMatches(multi, queryA, sort, expected);
-
- sort.setSort(new SortField ("int", SortField.INT));
- expected = isFull ? "IDHFGJABEC" : "IDHFGJAEBC";
- assertMatches(multi, queryA, sort, expected);
-
- sort.setSort(new SortField ("float", SortField.FLOAT), SortField.FIELD_DOC);
- assertMatches(multi, queryA, sort, "GDHJCIEFAB");
-
- sort.setSort(new SortField("float", SortField.FLOAT));
- assertMatches(multi, queryA, sort, "GDHJCIEFAB");
-
- sort.setSort(new SortField("string", SortField.STRING));
- assertMatches(multi, queryA, sort, "DJAIHGFEBC");
-
- sort.setSort(new SortField ("int", SortField.INT, true));
- expected = isFull ? "CABEJGFHDI" : "CAEBJGFHDI";
- assertMatches(multi, queryA, sort, expected);
-
- sort.setSort(new SortField ("float", SortField.FLOAT, true));
- assertMatches(multi, queryA, sort, "BAFECIJHDG");
-
- sort.setSort(new SortField ("string", SortField.STRING, true));
- assertMatches(multi, queryA, sort, "CBEFGHIAJD");
-
- sort.setSort(new SortField ("int", SortField.INT), new SortField ("float", SortField.FLOAT));
- assertMatches(multi, queryA, sort, "IDHFGJEABC");
-
- sort.setSort(new SortField ("float", SortField.FLOAT), new SortField ("string", SortField.STRING));
- assertMatches(multi, queryA, sort, "GDHJICEFAB");
-
- sort.setSort(new SortField ("int", SortField.INT));
- assertMatches(multi, queryF, sort, "IZJ");
-
- sort.setSort(new SortField ("int", SortField.INT, true));
- assertMatches(multi, queryF, sort, "JZI");
-
- sort.setSort(new SortField ("float", SortField.FLOAT));
- assertMatches(multi, queryF, sort, "ZJI");
-
- sort.setSort(new SortField ("string", SortField.STRING));
- assertMatches(multi, queryF, sort, "ZJI");
-
- sort.setSort(new SortField ("string", SortField.STRING, true));
- assertMatches(multi, queryF, sort, "IJZ");
-
- // up to this point, all of the searches should have "sane"
- // FieldCache behavior, and should have reused hte cache in several cases
- assertSaneFieldCaches(getName() + " Basics");
- // next we'll check an alternate Locale for string, so purge first
- FieldCache.DEFAULT.purgeAllCaches();
-
- sort.setSort(new SortField ("string", Locale.US) );
- assertMatches(multi, queryA, sort, "DJAIHGFEBC");
-
- sort.setSort(new SortField ("string", Locale.US, true));
- assertMatches(multi, queryA, sort, "CBEFGHIAJD");
-
- assertSaneFieldCaches(getName() + " Locale.US");
- FieldCache.DEFAULT.purgeAllCaches();
- }
-
- // make sure the documents returned by the search match the expected list
- private void assertMatches(Searcher searcher, Query query, Sort sort,
- String expectedResult) throws IOException {
- //ScoreDoc[] result = searcher.search (query, null, 1000, sort).scoreDocs;
- TopDocs hits = searcher.search (query, null, expectedResult.length(), sort);
- ScoreDoc[] result = hits.scoreDocs;
- assertEquals(hits.totalHits, expectedResult.length());
- StringBuilder buff = new StringBuilder(10);
- int n = result.length;
- for (int i=0; i getScores (ScoreDoc[] hits, Searcher searcher)
- throws IOException {
- HashMap scoreMap = new HashMap();
- int n = hits.length;
- for (int i=0; i m1, HashMap, ?> m2) {
- int n = m1.size();
- int m = m2.size();
- assertEquals (n, m);
- Iterator> iter = m1.keySet().iterator();
- while (iter.hasNext()) {
- Object key = iter.next();
- Object o1 = m1.get(key);
- Object o2 = m2.get(key);
- if (o1 instanceof Float) {
- assertEquals(((Float)o1).floatValue(), ((Float)o2).floatValue(), 1e-6);
- } else {
- assertEquals (m1.get(key), m2.get(key));
- }
- }
- }
-}
diff --git a/lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java b/lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java
index ff88c4f48e3..3f417f40844 100644
--- a/lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java
+++ b/lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java
@@ -252,7 +252,7 @@ public class TestCartesian extends LuceneTestCase {
// Perform the search, using the term query, the serial chain filter, and the
// distance sort
- TopDocs hits = searcher.search(customScore.createWeight(searcher),null, 1000, sort);
+ TopDocs hits = searcher.search(customScore,null, 1000, sort);
int results = hits.totalHits;
ScoreDoc[] scoreDocs = hits.scoreDocs;
@@ -348,7 +348,7 @@ public class TestCartesian extends LuceneTestCase {
// Perform the search, using the term query, the serial chain filter, and the
// distance sort
- TopDocs hits = searcher.search(customScore.createWeight(searcher),null, 1000, sort);
+ TopDocs hits = searcher.search(customScore,null, 1000, sort);
int results = hits.totalHits;
ScoreDoc[] scoreDocs = hits.scoreDocs;
@@ -444,7 +444,7 @@ public class TestCartesian extends LuceneTestCase {
// Perform the search, using the term query, the serial chain filter, and the
// distance sort
- TopDocs hits = searcher.search(customScore.createWeight(searcher),null, 1000, sort);
+ TopDocs hits = searcher.search(customScore,null, 1000, sort);
int results = hits.totalHits;
ScoreDoc[] scoreDocs = hits.scoreDocs;
@@ -539,7 +539,7 @@ public class TestCartesian extends LuceneTestCase {
// Perform the search, using the term query, the serial chain filter, and the
// distance sort
- TopDocs hits = searcher.search(customScore.createWeight(searcher),dq.getFilter(), 1000); //,sort);
+ TopDocs hits = searcher.search(customScore,dq.getFilter(), 1000); //,sort);
int results = hits.totalHits;
ScoreDoc[] scoreDocs = hits.scoreDocs;
diff --git a/lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynExpand.java b/lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynExpand.java
index 0e573e85555..908cfd66eff 100755
--- a/lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynExpand.java
+++ b/lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynExpand.java
@@ -39,7 +39,6 @@ import org.apache.lucene.search.Collector;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
-import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;
@@ -104,7 +103,7 @@ public final class SynExpand {
* @return the expanded Query
*/
public static Query expand( String query,
- Searcher syns,
+ IndexSearcher syns,
Analyzer a,
String f,
final float boost)
diff --git a/lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynLookup.java b/lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynLookup.java
index 894e7494908..066df71ba02 100644
--- a/lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynLookup.java
+++ b/lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynLookup.java
@@ -39,7 +39,6 @@ import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Scorer;
-import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.store.FSDirectory;
@@ -114,7 +113,7 @@ public class SynLookup {
* @param boost
*/
public static Query expand( String query,
- Searcher syns,
+ IndexSearcher syns,
Analyzer a,
final String field,
final float boost)
diff --git a/lucene/contrib/wordnet/src/test/org/apache/lucene/wordnet/TestWordnet.java b/lucene/contrib/wordnet/src/test/org/apache/lucene/wordnet/TestWordnet.java
index 01faf42a80d..52171479992 100644
--- a/lucene/contrib/wordnet/src/test/org/apache/lucene/wordnet/TestWordnet.java
+++ b/lucene/contrib/wordnet/src/test/org/apache/lucene/wordnet/TestWordnet.java
@@ -26,13 +26,12 @@ import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
-import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase;
public class TestWordnet extends LuceneTestCase {
- private Searcher searcher;
+ private IndexSearcher searcher;
private Directory dir;
String storePathName = new File(TEMP_DIR,"testLuceneWordnet").getAbsolutePath();
diff --git a/lucene/src/java/org/apache/lucene/document/Document.java b/lucene/src/java/org/apache/lucene/document/Document.java
index 680bfd99c06..1dea49d465c 100644
--- a/lucene/src/java/org/apache/lucene/document/Document.java
+++ b/lucene/src/java/org/apache/lucene/document/Document.java
@@ -19,7 +19,6 @@ package org.apache.lucene.document;
import java.util.*; // for javadoc
import org.apache.lucene.search.ScoreDoc; // for javadoc
-import org.apache.lucene.search.Searcher; // for javadoc
import org.apache.lucene.index.IndexReader; // for javadoc
/** Documents are the unit of indexing and search.
@@ -32,8 +31,7 @@ import org.apache.lucene.index.IndexReader; // for javadoc
*
* Note that fields which are not {@link Fieldable#isStored() stored} are
* not available in documents retrieved from the index, e.g. with {@link
- * ScoreDoc#doc}, {@link Searcher#doc(int)} or {@link
- * IndexReader#document(int)}.
+ * ScoreDoc#doc} or {@link IndexReader#document(int)}.
*/
public final class Document implements java.io.Serializable {
diff --git a/lucene/src/java/org/apache/lucene/index/LogMergePolicy.java b/lucene/src/java/org/apache/lucene/index/LogMergePolicy.java
index 4a28195ecd7..357460c1d9d 100644
--- a/lucene/src/java/org/apache/lucene/index/LogMergePolicy.java
+++ b/lucene/src/java/org/apache/lucene/index/LogMergePolicy.java
@@ -338,10 +338,18 @@ public abstract class LogMergePolicy extends MergePolicy {
int maxNumSegments, Set segmentsToOptimize) throws IOException {
assert maxNumSegments > 0;
+ if (verbose()) {
+ message("findMergesForOptimize: maxNumSegs=" + maxNumSegments + " segsToOptimize= "+ segmentsToOptimize);
+ }
// If the segments are already optimized (e.g. there's only 1 segment), or
// there are {
// Implement coord disabling.
// Inherit javadoc.
@Override
- public Similarity getSimilarity(Searcher searcher) {
+ public Similarity getSimilarity(IndexSearcher searcher) {
Similarity result = super.getSimilarity(searcher);
if (disableCoord) { // disable coord as requested
result = new SimilarityDelegator(result) {
@@ -179,7 +179,7 @@ public class BooleanQuery extends Query implements Iterable {
protected ArrayList weights;
protected int maxCoord; // num optional + num required
- public BooleanWeight(Searcher searcher)
+ public BooleanWeight(IndexSearcher searcher)
throws IOException {
this.similarity = getSimilarity(searcher);
weights = new ArrayList(clauses.size());
@@ -362,7 +362,7 @@ public class BooleanQuery extends Query implements Iterable {
}
@Override
- public Weight createWeight(Searcher searcher) throws IOException {
+ public Weight createWeight(IndexSearcher searcher) throws IOException {
return new BooleanWeight(searcher);
}
diff --git a/lucene/src/java/org/apache/lucene/search/ConstantScoreQuery.java b/lucene/src/java/org/apache/lucene/search/ConstantScoreQuery.java
index bcb372e2eaf..fe76121d3c2 100644
--- a/lucene/src/java/org/apache/lucene/search/ConstantScoreQuery.java
+++ b/lucene/src/java/org/apache/lucene/search/ConstantScoreQuery.java
@@ -100,7 +100,7 @@ public class ConstantScoreQuery extends Query {
private float queryNorm;
private float queryWeight;
- public ConstantWeight(Searcher searcher) throws IOException {
+ public ConstantWeight(IndexSearcher searcher) throws IOException {
this.similarity = getSimilarity(searcher);
this.innerWeight = (query == null) ? null : query.createWeight(searcher);
}
@@ -256,7 +256,7 @@ public class ConstantScoreQuery extends Query {
}
@Override
- public Weight createWeight(Searcher searcher) throws IOException {
+ public Weight createWeight(IndexSearcher searcher) throws IOException {
return new ConstantScoreQuery.ConstantWeight(searcher);
}
diff --git a/lucene/src/java/org/apache/lucene/search/DisjunctionMaxQuery.java b/lucene/src/java/org/apache/lucene/search/DisjunctionMaxQuery.java
index 240903f36f6..b6cd0295247 100644
--- a/lucene/src/java/org/apache/lucene/search/DisjunctionMaxQuery.java
+++ b/lucene/src/java/org/apache/lucene/search/DisjunctionMaxQuery.java
@@ -101,7 +101,7 @@ public class DisjunctionMaxQuery extends Query implements Iterable {
protected ArrayList weights = new ArrayList(); // The Weight's for our subqueries, in 1-1 correspondence with disjuncts
/* Construct the Weight for this Query searched by searcher. Recursively construct subquery weights. */
- public DisjunctionMaxWeight(Searcher searcher) throws IOException {
+ public DisjunctionMaxWeight(IndexSearcher searcher) throws IOException {
this.similarity = searcher.getSimilarity();
for (Query disjunctQuery : disjuncts) {
weights.add(disjunctQuery.createWeight(searcher));
@@ -180,7 +180,7 @@ public class DisjunctionMaxQuery extends Query implements Iterable {
/* Create the Weight used to score us */
@Override
- public Weight createWeight(Searcher searcher) throws IOException {
+ public Weight createWeight(IndexSearcher searcher) throws IOException {
return new DisjunctionMaxWeight(searcher);
}
diff --git a/lucene/src/java/org/apache/lucene/search/FilteredQuery.java b/lucene/src/java/org/apache/lucene/search/FilteredQuery.java
index 70301d3ab25..6f27cfc6773 100644
--- a/lucene/src/java/org/apache/lucene/search/FilteredQuery.java
+++ b/lucene/src/java/org/apache/lucene/search/FilteredQuery.java
@@ -59,7 +59,7 @@ extends Query {
* This is accomplished by overriding the Scorer returned by the Weight.
*/
@Override
- public Weight createWeight(final Searcher searcher) throws IOException {
+ public Weight createWeight(final IndexSearcher searcher) throws IOException {
final Weight weight = query.createWeight (searcher);
final Similarity similarity = query.getSimilarity(searcher);
return new Weight() {
diff --git a/lucene/src/java/org/apache/lucene/search/IndexSearcher.java b/lucene/src/java/org/apache/lucene/search/IndexSearcher.java
index 8aabf3e3026..cc0dc763c08 100644
--- a/lucene/src/java/org/apache/lucene/search/IndexSearcher.java
+++ b/lucene/src/java/org/apache/lucene/search/IndexSearcher.java
@@ -19,7 +19,17 @@ package org.apache.lucene.search;
import java.io.IOException;
import java.util.ArrayList;
+import java.util.Iterator;
import java.util.List;
+import java.util.NoSuchElementException;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CompletionService;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Executor;
+import java.util.concurrent.ExecutorCompletionService;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector;
@@ -28,6 +38,7 @@ import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.ReaderUtil;
+import org.apache.lucene.util.ThreadInterruptedException;
/** Implements search over a single IndexReader.
*
@@ -44,14 +55,19 @@ import org.apache.lucene.util.ReaderUtil;
* synchronize on the IndexSearcher
instance;
* use your own (non-Lucene) objects instead.
*/
-public class IndexSearcher extends Searcher {
+public class IndexSearcher {
IndexReader reader;
private boolean closeReader;
// NOTE: these members might change in incompatible ways
// in the next release
- protected IndexReader[] subReaders;
- protected int[] docStarts;
+ protected final IndexReader[] subReaders;
+ protected final IndexSearcher[] subSearchers;
+ protected final int[] docStarts;
+ private final ExecutorService executor;
+
+ /** The Similarity implementation used by this searcher. */
+ private Similarity similarity = Similarity.getDefault();
/** Creates a searcher searching the index in the named
* directory, with readOnly=true
@@ -60,7 +76,7 @@ public class IndexSearcher extends Searcher {
* @throws IOException if there is a low-level IO error
*/
public IndexSearcher(Directory path) throws CorruptIndexException, IOException {
- this(IndexReader.open(path, true), true);
+ this(IndexReader.open(path, true), true, null);
}
/** Creates a searcher searching the index in the named
@@ -75,12 +91,27 @@ public class IndexSearcher extends Searcher {
* @throws IOException if there is a low-level IO error
*/
public IndexSearcher(Directory path, boolean readOnly) throws CorruptIndexException, IOException {
- this(IndexReader.open(path, readOnly), true);
+ this(IndexReader.open(path, readOnly), true, null);
}
/** Creates a searcher searching the provided index. */
public IndexSearcher(IndexReader r) {
- this(r, false);
+ this(r, false, null);
+ }
+
+ /** Runs searches for each segment separately, using the
+ * provided ExecutorService. IndexSearcher will not
+ * shutdown/awaitTermination this ExecutorService on
+ * close; you must do so, eventually, on your own. NOTE:
+ * if you are using {@link NIOFSDirectory}, do not use
+ * the shutdownNow method of ExecutorService as this uses
+ * Thread.interrupt under-the-hood which can silently
+ * close file descriptors (see LUCENE-2239).
+ *
+ * @lucene.experimental */
+ public IndexSearcher(IndexReader r, ExecutorService executor) {
+ this(r, false, executor);
}
/** Expert: directly specify the reader, subReaders and
@@ -91,21 +122,58 @@ public class IndexSearcher extends Searcher {
this.reader = reader;
this.subReaders = subReaders;
this.docStarts = docStarts;
+ subSearchers = new IndexSearcher[subReaders.length];
+ for(int i=0;iLUCENE-2239).
+ *
+ * @lucene.experimental */
+ public IndexSearcher(IndexReader reader, IndexReader[] subReaders, int[] docStarts, ExecutorService executor) {
+ this.reader = reader;
+ this.subReaders = subReaders;
+ this.docStarts = docStarts;
+ subSearchers = new IndexSearcher[subReaders.length];
+ for(int i=0;i subReadersList = new ArrayList();
gatherSubReaders(subReadersList, reader);
subReaders = subReadersList.toArray(new IndexReader[subReadersList.size()]);
docStarts = new int[subReaders.length];
+ subSearchers = new IndexSearcher[subReaders.length];
int maxDoc = 0;
for (int i = 0; i < subReaders.length; i++) {
docStarts[i] = maxDoc;
maxDoc += subReaders[i].maxDoc();
+ if (subReaders[i] == r) {
+ subSearchers[i] = this;
+ } else {
+ subSearchers[i] = new IndexSearcher(subReaders[i]);
+ }
}
}
@@ -118,59 +186,219 @@ public class IndexSearcher extends Searcher {
return reader;
}
+ /** Returns the atomic subReaders used by this searcher. */
+ public IndexReader[] getSubReaders() {
+ return subReaders;
+ }
+
+ /** Expert: Returns one greater than the largest possible document number.
+ *
+ * @see org.apache.lucene.index.IndexReader#maxDoc()
+ */
+ public int maxDoc() {
+ return reader.maxDoc();
+ }
+
+ /** Returns total docFreq for this term. */
+ public int docFreq(final Term term) throws IOException {
+ if (executor == null) {
+ return reader.docFreq(term);
+ } else {
+ final ExecutionHelper runner = new ExecutionHelper(executor);
+ for(int i = 0; i < subReaders.length; i++) {
+ final IndexSearcher searchable = subSearchers[i];
+ runner.submit(new Callable() {
+ public Integer call() throws IOException {
+ return Integer.valueOf(searchable.docFreq(term));
+ }
+ });
+ }
+ int docFreq = 0;
+ for (Integer num : runner) {
+ docFreq += num.intValue();
+ }
+ return docFreq;
+ }
+ }
+
+ /* Sugar for .getIndexReader().document(docID) */
+ public Document doc(int docID) throws CorruptIndexException, IOException {
+ return reader.document(docID);
+ }
+
+ /* Sugar for .getIndexReader().document(docID, fieldSelector) */
+ public Document doc(int docID, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
+ return reader.document(docID, fieldSelector);
+ }
+
+ /** Expert: Set the Similarity implementation used by this Searcher.
+ *
+ * @see Similarity#setDefault(Similarity)
+ */
+ public void setSimilarity(Similarity similarity) {
+ this.similarity = similarity;
+ }
+
+ public Similarity getSimilarity() {
+ return similarity;
+ }
+
/**
* Note that the underlying IndexReader is not closed, if
* IndexSearcher was constructed with IndexSearcher(IndexReader r).
* If the IndexReader was supplied implicitly by specifying a directory, then
- * the IndexReader gets closed.
+ * the IndexReader is closed.
*/
- @Override
public void close() throws IOException {
- if(closeReader)
+ if (closeReader) {
reader.close();
- }
-
- // inherit javadoc
- @Override
- public int docFreq(Term term) throws IOException {
- return reader.docFreq(term);
- }
-
- // inherit javadoc
- @Override
- public Document doc(int i) throws CorruptIndexException, IOException {
- return reader.document(i);
- }
-
- // inherit javadoc
- @Override
- public Document doc(int i, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
- return reader.document(i, fieldSelector);
- }
-
- // inherit javadoc
- @Override
- public int maxDoc() throws IOException {
- return reader.maxDoc();
- }
-
- // inherit javadoc
- @Override
- public TopDocs search(Weight weight, Filter filter, int nDocs) throws IOException {
-
- int limit = reader.maxDoc();
- if (limit == 0) {
- limit = 1;
}
- nDocs = Math.min(nDocs, limit);
-
- TopScoreDocCollector collector = TopScoreDocCollector.create(nDocs, !weight.scoresDocsOutOfOrder());
- search(weight, filter, collector);
- return collector.topDocs();
}
- @Override
- public TopFieldDocs search(Weight weight, Filter filter,
+ /** Finds the top n
+ * hits for query
.
+ *
+ * @throws BooleanQuery.TooManyClauses
+ */
+ public TopDocs search(Query query, int n)
+ throws IOException {
+ return search(query, null, n);
+ }
+
+
+ /** Finds the top n
+ * hits for query
, applying filter
if non-null.
+ *
+ * @throws BooleanQuery.TooManyClauses
+ */
+ public TopDocs search(Query query, Filter filter, int n)
+ throws IOException {
+ return search(createWeight(query), filter, n);
+ }
+
+ /** Lower-level search API.
+ *
+ * {@link Collector#collect(int)} is called for every matching
+ * document.
+ *
Collector-based access to remote indexes is discouraged.
+ *
+ *
Applications should only use this if they need all of the
+ * matching documents. The high-level search API ({@link
+ * Searcher#search(Query, Filter, int)}) is usually more efficient, as it skips
+ * non-high-scoring hits.
+ *
+ * @param query to match documents
+ * @param filter if non-null, used to permit documents to be collected.
+ * @param results to receive hits
+ * @throws BooleanQuery.TooManyClauses
+ */
+ public void search(Query query, Filter filter, Collector results)
+ throws IOException {
+ search(createWeight(query), filter, results);
+ }
+
+ /** Lower-level search API.
+ *
+ *
{@link Collector#collect(int)} is called for every matching document.
+ *
+ *
Applications should only use this if they need all of the
+ * matching documents. The high-level search API ({@link
+ * Searcher#search(Query, int)}) is usually more efficient, as it skips
+ * non-high-scoring hits.
+ *
Note: The score
passed to this method is a raw score.
+ * In other words, the score will not necessarily be a float whose value is
+ * between 0 and 1.
+ * @throws BooleanQuery.TooManyClauses
+ */
+ public void search(Query query, Collector results)
+ throws IOException {
+ search(createWeight(query), null, results);
+ }
+
+ /** Search implementation with arbitrary sorting. Finds
+ * the top n
hits for query
, applying
+ * filter
if non-null, and sorting the hits by the criteria in
+ * sort
.
+ *
+ *
NOTE: this does not compute scores by default; use
+ * {@link IndexSearcher#setDefaultFieldSortScoring} to
+ * enable scoring.
+ *
+ * @throws BooleanQuery.TooManyClauses
+ */
+ public TopFieldDocs search(Query query, Filter filter, int n,
+ Sort sort) throws IOException {
+ return search(createWeight(query), filter, n, sort);
+ }
+
+ /**
+ * Search implementation with arbitrary sorting and no filter.
+ * @param query The query to search for
+ * @param n Return only the top n results
+ * @param sort The {@link org.apache.lucene.search.Sort} object
+ * @return The top docs, sorted according to the supplied {@link org.apache.lucene.search.Sort} instance
+ * @throws IOException
+ */
+ public TopFieldDocs search(Query query, int n,
+ Sort sort) throws IOException {
+ return search(createWeight(query), null, n, sort);
+ }
+
+ /** Expert: Low-level search implementation. Finds the top n
+ * hits for query
, applying filter
if non-null.
+ *
+ *
Applications should usually call {@link Searcher#search(Query,int)} or
+ * {@link Searcher#search(Query,Filter,int)} instead.
+ * @throws BooleanQuery.TooManyClauses
+ */
+ protected TopDocs search(Weight weight, Filter filter, int nDocs) throws IOException {
+
+ if (executor == null) {
+ // single thread
+ int limit = reader.maxDoc();
+ if (limit == 0) {
+ limit = 1;
+ }
+ nDocs = Math.min(nDocs, limit);
+ TopScoreDocCollector collector = TopScoreDocCollector.create(nDocs, !weight.scoresDocsOutOfOrder());
+ search(weight, filter, collector);
+ return collector.topDocs();
+ } else {
+ final HitQueue hq = new HitQueue(nDocs, false);
+ final Lock lock = new ReentrantLock();
+ final ExecutionHelper runner = new ExecutionHelper(executor);
+
+ for (int i = 0; i < subReaders.length; i++) { // search each sub
+ runner.submit(
+ new MultiSearcherCallableNoSort(lock, subSearchers[i], weight, filter, nDocs, hq, i, docStarts));
+ }
+
+ int totalHits = 0;
+ float maxScore = Float.NEGATIVE_INFINITY;
+ for (final TopDocs topDocs : runner) {
+ totalHits += topDocs.totalHits;
+ maxScore = Math.max(maxScore, topDocs.getMaxScore());
+ }
+
+ final ScoreDoc[] scoreDocs = new ScoreDoc[hq.size()];
+ for (int i = hq.size() - 1; i >= 0; i--) // put docs in array
+ scoreDocs[i] = hq.pop();
+
+ return new TopDocs(totalHits, scoreDocs, maxScore);
+ }
+ }
+
+ /** Expert: Low-level search implementation with arbitrary sorting. Finds
+ * the top n
hits for query
, applying
+ * filter
if non-null, and sorting the hits by the criteria in
+ * sort
.
+ *
+ * Applications should usually call {@link
+ * Searcher#search(Query,Filter,int,Sort)} instead.
+ *
+ * @throws BooleanQuery.TooManyClauses
+ */
+ protected TopFieldDocs search(Weight weight, Filter filter,
final int nDocs, Sort sort) throws IOException {
return search(weight, filter, nDocs, sort, true);
}
@@ -186,26 +414,74 @@ public class IndexSearcher extends Searcher {
* then pass that to {@link #search(Weight, Filter,
* Collector)}.
*/
- public TopFieldDocs search(Weight weight, Filter filter, int nDocs,
+ protected TopFieldDocs search(Weight weight, Filter filter, int nDocs,
Sort sort, boolean fillFields)
throws IOException {
- int limit = reader.maxDoc();
- if (limit == 0) {
- limit = 1;
- }
- nDocs = Math.min(nDocs, limit);
+ if (sort == null) throw new NullPointerException();
- TopFieldCollector collector = TopFieldCollector.create(sort, nDocs,
- fillFields, fieldSortDoTrackScores, fieldSortDoMaxScore, !weight.scoresDocsOutOfOrder());
- search(weight, filter, collector);
- return (TopFieldDocs) collector.topDocs();
+ if (executor == null) {
+ // single thread
+ int limit = reader.maxDoc();
+ if (limit == 0) {
+ limit = 1;
+ }
+ nDocs = Math.min(nDocs, limit);
+
+ TopFieldCollector collector = TopFieldCollector.create(sort, nDocs,
+ fillFields, fieldSortDoTrackScores, fieldSortDoMaxScore, !weight.scoresDocsOutOfOrder());
+ search(weight, filter, collector);
+ return (TopFieldDocs) collector.topDocs();
+ } else {
+ // TODO: make this respect fillFields
+ final FieldDocSortedHitQueue hq = new FieldDocSortedHitQueue(nDocs);
+ final Lock lock = new ReentrantLock();
+ final ExecutionHelper runner = new ExecutionHelper(executor);
+ for (int i = 0; i < subReaders.length; i++) { // search each sub
+ runner.submit(
+ new MultiSearcherCallableWithSort(lock, subSearchers[i], weight, filter, nDocs, hq, sort, i, docStarts));
+ }
+ int totalHits = 0;
+ float maxScore = Float.NEGATIVE_INFINITY;
+ for (final TopFieldDocs topFieldDocs : runner) {
+ totalHits += topFieldDocs.totalHits;
+ maxScore = Math.max(maxScore, topFieldDocs.getMaxScore());
+ }
+ final ScoreDoc[] scoreDocs = new ScoreDoc[hq.size()];
+ for (int i = hq.size() - 1; i >= 0; i--) // put docs in array
+ scoreDocs[i] = hq.pop();
+
+ return new TopFieldDocs(totalHits, scoreDocs, hq.getFields(), maxScore);
+ }
}
- @Override
- public void search(Weight weight, Filter filter, Collector collector)
+ /**
+ * Lower-level search API.
+ *
+ *
+ * {@link Collector#collect(int)} is called for every document.
+ * Collector-based access to remote indexes is discouraged.
+ *
+ *
+ * Applications should only use this if they need all of the matching
+ * documents. The high-level search API ({@link Searcher#search(Query,int)}) is
+ * usually more efficient, as it skips non-high-scoring hits.
+ *
+ * @param weight
+ * to match documents
+ * @param filter
+ * if non-null, used to permit documents to be collected.
+ * @param collector
+ * to receive hits
+ * @throws BooleanQuery.TooManyClauses
+ */
+ protected void search(Weight weight, Filter filter, Collector collector)
throws IOException {
-
+
+ // TODO: should we make this
+ // threaded...? the Collector could be sync'd?
+
+ // always use single thread:
if (filter == null) {
for (int i = 0; i < subReaders.length; i++) { // search each subreader
collector.setNextReader(subReaders[i], docStarts[i]);
@@ -268,7 +544,9 @@ public class IndexSearcher extends Searcher {
}
}
- @Override
+ /** Expert: called to re-write queries into primitive queries.
+ * @throws BooleanQuery.TooManyClauses
+ */
public Query rewrite(Query original) throws IOException {
Query query = original;
for (Query rewrittenQuery = query.rewrite(reader); rewrittenQuery != query;
@@ -278,8 +556,30 @@ public class IndexSearcher extends Searcher {
return query;
}
- @Override
- public Explanation explain(Weight weight, int doc) throws IOException {
+ /** Returns an Explanation that describes how doc
scored against
+ * query
.
+ *
+ *
This is intended to be used in developing Similarity implementations,
+ * and, for good performance, should not be displayed with every hit.
+ * Computing an explanation is as expensive as executing the query over the
+ * entire index.
+ */
+ public Explanation explain(Query query, int doc) throws IOException {
+ return explain(createWeight(query), doc);
+ }
+
+ /** Expert: low-level implementation method
+ * Returns an Explanation that describes how doc
scored against
+ * weight
.
+ *
+ *
This is intended to be used in developing Similarity implementations,
+ * and, for good performance, should not be displayed with every hit.
+ * Computing an explanation is as expensive as executing the query over the
+ * entire index.
+ *
Applications should call {@link Searcher#explain(Query, int)}.
+ * @throws BooleanQuery.TooManyClauses
+ */
+ protected Explanation explain(Weight weight, int doc) throws IOException {
int n = ReaderUtil.subIndex(doc, docStarts);
int deBasedDoc = doc - docStarts[n];
@@ -305,4 +605,175 @@ public class IndexSearcher extends Searcher {
fieldSortDoTrackScores = doTrackScores;
fieldSortDoMaxScore = doMaxScore;
}
+
+ /**
+ * creates a weight for query
+ * @return new weight
+ */
+ protected Weight createWeight(Query query) throws IOException {
+ return query.weight(this);
+ }
+
+
+ /**
+ * A thread subclass for searching a single searchable
+ */
+ private static final class MultiSearcherCallableNoSort implements Callable {
+
+ private final Lock lock;
+ private final IndexSearcher searchable;
+ private final Weight weight;
+ private final Filter filter;
+ private final int nDocs;
+ private final int i;
+ private final HitQueue hq;
+ private final int[] starts;
+
+ public MultiSearcherCallableNoSort(Lock lock, IndexSearcher searchable, Weight weight,
+ Filter filter, int nDocs, HitQueue hq, int i, int[] starts) {
+ this.lock = lock;
+ this.searchable = searchable;
+ this.weight = weight;
+ this.filter = filter;
+ this.nDocs = nDocs;
+ this.hq = hq;
+ this.i = i;
+ this.starts = starts;
+ }
+
+ public TopDocs call() throws IOException {
+ final TopDocs docs = searchable.search (weight, filter, nDocs);
+ final ScoreDoc[] scoreDocs = docs.scoreDocs;
+ for (int j = 0; j < scoreDocs.length; j++) { // merge scoreDocs into hq
+ final ScoreDoc scoreDoc = scoreDocs[j];
+ scoreDoc.doc += starts[i]; // convert doc
+ //it would be so nice if we had a thread-safe insert
+ lock.lock();
+ try {
+ if (scoreDoc == hq.insertWithOverflow(scoreDoc))
+ break;
+ } finally {
+ lock.unlock();
+ }
+ }
+ return docs;
+ }
+ }
+
+
+ /**
+ * A thread subclass for searching a single searchable
+ */
+ private static final class MultiSearcherCallableWithSort implements Callable {
+
+ private final Lock lock;
+ private final IndexSearcher searchable;
+ private final Weight weight;
+ private final Filter filter;
+ private final int nDocs;
+ private final int i;
+ private final FieldDocSortedHitQueue hq;
+ private final int[] starts;
+ private final Sort sort;
+
+ public MultiSearcherCallableWithSort(Lock lock, IndexSearcher searchable, Weight weight,
+ Filter filter, int nDocs, FieldDocSortedHitQueue hq, Sort sort, int i, int[] starts) {
+ this.lock = lock;
+ this.searchable = searchable;
+ this.weight = weight;
+ this.filter = filter;
+ this.nDocs = nDocs;
+ this.hq = hq;
+ this.i = i;
+ this.starts = starts;
+ this.sort = sort;
+ }
+
+ public TopFieldDocs call() throws IOException {
+ final TopFieldDocs docs = searchable.search (weight, filter, nDocs, sort);
+ // If one of the Sort fields is FIELD_DOC, need to fix its values, so that
+ // it will break ties by doc Id properly. Otherwise, it will compare to
+ // 'relative' doc Ids, that belong to two different searchables.
+ for (int j = 0; j < docs.fields.length; j++) {
+ if (docs.fields[j].getType() == SortField.DOC) {
+ // iterate over the score docs and change their fields value
+ for (int j2 = 0; j2 < docs.scoreDocs.length; j2++) {
+ FieldDoc fd = (FieldDoc) docs.scoreDocs[j2];
+ fd.fields[j] = Integer.valueOf(((Integer) fd.fields[j]).intValue() + starts[i]);
+ }
+ break;
+ }
+ }
+
+ lock.lock();
+ try {
+ hq.setFields(docs.fields);
+ } finally {
+ lock.unlock();
+ }
+
+ final ScoreDoc[] scoreDocs = docs.scoreDocs;
+ for (int j = 0; j < scoreDocs.length; j++) { // merge scoreDocs into hq
+ final FieldDoc fieldDoc = (FieldDoc) scoreDocs[j];
+ fieldDoc.doc += starts[i]; // convert doc
+ //it would be so nice if we had a thread-safe insert
+ lock.lock();
+ try {
+ if (fieldDoc == hq.insertWithOverflow(fieldDoc))
+ break;
+ } finally {
+ lock.unlock();
+ }
+ }
+ return docs;
+ }
+ }
+
+ /**
+ * A helper class that wraps a {@link CompletionService} and provides an
+ * iterable interface to the completed {@link Callable} instances.
+ *
+ * @param
+ * the type of the {@link Callable} return value
+ */
+ private static final class ExecutionHelper implements Iterator, Iterable {
+ private final CompletionService service;
+ private int numTasks;
+
+ ExecutionHelper(final Executor executor) {
+ this.service = new ExecutorCompletionService(executor);
+ }
+
+ public boolean hasNext() {
+ return numTasks > 0;
+ }
+
+ public void submit(Callable task) {
+ this.service.submit(task);
+ ++numTasks;
+ }
+
+ public T next() {
+ if(!this.hasNext())
+ throw new NoSuchElementException();
+ try {
+ return service.take().get();
+ } catch (InterruptedException e) {
+ throw new ThreadInterruptedException(e);
+ } catch (ExecutionException e) {
+ throw new RuntimeException(e);
+ } finally {
+ --numTasks;
+ }
+ }
+
+ public void remove() {
+ throw new UnsupportedOperationException();
+ }
+
+ public Iterator iterator() {
+ // use the shortcut here - this is only used in a privat context
+ return this;
+ }
+ }
}
diff --git a/lucene/src/java/org/apache/lucene/search/MatchAllDocsQuery.java b/lucene/src/java/org/apache/lucene/search/MatchAllDocsQuery.java
index c96a8d06e03..f9ac9b0aac5 100644
--- a/lucene/src/java/org/apache/lucene/search/MatchAllDocsQuery.java
+++ b/lucene/src/java/org/apache/lucene/search/MatchAllDocsQuery.java
@@ -95,7 +95,7 @@ public class MatchAllDocsQuery extends Query {
private float queryWeight;
private float queryNorm;
- public MatchAllDocsWeight(Searcher searcher) {
+ public MatchAllDocsWeight(IndexSearcher searcher) {
this.similarity = searcher.getSimilarity();
}
@@ -147,7 +147,7 @@ public class MatchAllDocsQuery extends Query {
}
@Override
- public Weight createWeight(Searcher searcher) {
+ public Weight createWeight(IndexSearcher searcher) {
return new MatchAllDocsWeight(searcher);
}
diff --git a/lucene/src/java/org/apache/lucene/search/MultiPhraseQuery.java b/lucene/src/java/org/apache/lucene/search/MultiPhraseQuery.java
index 408354876c2..2eb23cd7bfd 100644
--- a/lucene/src/java/org/apache/lucene/search/MultiPhraseQuery.java
+++ b/lucene/src/java/org/apache/lucene/search/MultiPhraseQuery.java
@@ -134,7 +134,7 @@ public class MultiPhraseQuery extends Query {
private float queryNorm;
private float queryWeight;
- public MultiPhraseWeight(Searcher searcher)
+ public MultiPhraseWeight(IndexSearcher searcher)
throws IOException {
this.similarity = getSimilarity(searcher);
@@ -324,7 +324,7 @@ public class MultiPhraseQuery extends Query {
}
@Override
- public Weight createWeight(Searcher searcher) throws IOException {
+ public Weight createWeight(IndexSearcher searcher) throws IOException {
return new MultiPhraseWeight(searcher);
}
diff --git a/lucene/src/java/org/apache/lucene/search/MultiSearcher.java b/lucene/src/java/org/apache/lucene/search/MultiSearcher.java
deleted file mode 100644
index a7653e7f542..00000000000
--- a/lucene/src/java/org/apache/lucene/search/MultiSearcher.java
+++ /dev/null
@@ -1,461 +0,0 @@
-package org.apache.lucene.search;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.FieldSelector;
-import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.util.ReaderUtil;
-import org.apache.lucene.util.DummyConcurrentLock;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.Callable;
-import java.util.concurrent.locks.Lock;
-
-/** Implements search over a set of Searchables
.
- *
- * Applications usually need only call the inherited {@link #search(Query,int)}
- * or {@link #search(Query,Filter,int)} methods.
- */
-public class MultiSearcher extends Searcher {
-
- /**
- * Document Frequency cache acting as a Dummy-Searcher. This class is no
- * full-fledged Searcher, but only supports the methods necessary to
- * initialize Weights.
- */
- private static class CachedDfSource extends Searcher {
- private final Map dfMap; // Map from Terms to corresponding doc freqs
- private final int maxDoc; // document count
-
- public CachedDfSource(Map dfMap, int maxDoc, Similarity similarity) {
- this.dfMap = dfMap;
- this.maxDoc = maxDoc;
- setSimilarity(similarity);
- }
-
- @Override
- public int docFreq(Term term) {
- int df;
- try {
- df = dfMap.get(term).intValue();
- } catch (NullPointerException e) {
- throw new IllegalArgumentException("df for term " + term.text()
- + " not available");
- }
- return df;
- }
-
- @Override
- public int[] docFreqs(Term[] terms) {
- final int[] result = new int[terms.length];
- for (int i = 0; i < terms.length; i++) {
- result[i] = docFreq(terms[i]);
- }
- return result;
- }
-
- @Override
- public int maxDoc() {
- return maxDoc;
- }
-
- @Override
- public Query rewrite(Query query) {
- // this is a bit of a hack. We know that a query which
- // creates a Weight based on this Dummy-Searcher is
- // always already rewritten (see preparedWeight()).
- // Therefore we just return the unmodified query here
- return query;
- }
-
- @Override
- public void close() {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public Document doc(int i) {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public Document doc(int i, FieldSelector fieldSelector) {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public Explanation explain(Weight weight,int doc) {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public void search(Weight weight, Filter filter, Collector results) {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public TopDocs search(Weight weight,Filter filter,int n) {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public TopFieldDocs search(Weight weight,Filter filter,int n,Sort sort) {
- throw new UnsupportedOperationException();
- }
- }
-
- private Searchable[] searchables;
- private int[] starts;
- private int maxDoc = 0;
-
- /** Creates a searcher which searches searchers. */
- public MultiSearcher(Searchable... searchables) throws IOException {
- this.searchables = searchables;
-
- starts = new int[searchables.length + 1]; // build starts array
- for (int i = 0; i < searchables.length; i++) {
- starts[i] = maxDoc;
- maxDoc += searchables[i].maxDoc(); // compute maxDocs
- }
- starts[searchables.length] = maxDoc;
- }
-
- /** Return the array of {@link Searchable}s this searches. */
- public Searchable[] getSearchables() {
- return searchables;
- }
-
- protected int[] getStarts() {
- return starts;
- }
-
- // inherit javadoc
- @Override
- public void close() throws IOException {
- for (int i = 0; i < searchables.length; i++)
- searchables[i].close();
- }
-
- @Override
- public int docFreq(Term term) throws IOException {
- int docFreq = 0;
- for (int i = 0; i < searchables.length; i++)
- docFreq += searchables[i].docFreq(term);
- return docFreq;
- }
-
- // inherit javadoc
- @Override
- public Document doc(int n) throws CorruptIndexException, IOException {
- int i = subSearcher(n); // find searcher index
- return searchables[i].doc(n - starts[i]); // dispatch to searcher
- }
-
- // inherit javadoc
- @Override
- public Document doc(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
- int i = subSearcher(n); // find searcher index
- return searchables[i].doc(n - starts[i], fieldSelector); // dispatch to searcher
- }
-
- /** Returns index of the searcher for document n
in the array
- * used to construct this searcher. */
- public int subSearcher(int n) { // find searcher for doc n:
- return ReaderUtil.subIndex(n, starts);
- }
-
- /** Returns the document number of document n
within its
- * sub-index. */
- public int subDoc(int n) {
- return n - starts[subSearcher(n)];
- }
-
- @Override
- public int maxDoc() throws IOException {
- return maxDoc;
- }
-
- @Override
- public TopDocs search(Weight weight, Filter filter, int nDocs)
- throws IOException {
-
- nDocs = Math.min(nDocs, maxDoc());
- final HitQueue hq = new HitQueue(nDocs, false);
- int totalHits = 0;
-
- for (int i = 0; i < searchables.length; i++) { // search each searcher
- final TopDocs docs = new MultiSearcherCallableNoSort(DummyConcurrentLock.INSTANCE,
- searchables[i], weight, filter, nDocs, hq, i, starts).call();
- totalHits += docs.totalHits; // update totalHits
- }
-
- final ScoreDoc[] scoreDocs = new ScoreDoc[hq.size()];
- for (int i = hq.size()-1; i >= 0; i--) // put docs in array
- scoreDocs[i] = hq.pop();
-
- float maxScore = (totalHits==0) ? Float.NEGATIVE_INFINITY : scoreDocs[0].score;
-
- return new TopDocs(totalHits, scoreDocs, maxScore);
- }
-
- @Override
- public TopFieldDocs search (Weight weight, Filter filter, int n, Sort sort) throws IOException {
- n = Math.min(n, maxDoc());
- FieldDocSortedHitQueue hq = new FieldDocSortedHitQueue(n);
- int totalHits = 0;
-
- float maxScore=Float.NEGATIVE_INFINITY;
-
- for (int i = 0; i < searchables.length; i++) { // search each searcher
- final TopFieldDocs docs = new MultiSearcherCallableWithSort(DummyConcurrentLock.INSTANCE,
- searchables[i], weight, filter, n, hq, sort, i, starts).call();
- totalHits += docs.totalHits; // update totalHits
- maxScore = Math.max(maxScore, docs.getMaxScore());
- }
-
- final ScoreDoc[] scoreDocs = new ScoreDoc[hq.size()];
- for (int i = hq.size() - 1; i >= 0; i--) // put docs in array
- scoreDocs[i] = hq.pop();
-
- return new TopFieldDocs (totalHits, scoreDocs, hq.getFields(), maxScore);
- }
-
- // inherit javadoc
- @Override
- public void search(Weight weight, Filter filter, final Collector collector)
- throws IOException {
- for (int i = 0; i < searchables.length; i++) {
-
- final int start = starts[i];
-
- final Collector hc = new Collector() {
- @Override
- public void setScorer(Scorer scorer) throws IOException {
- collector.setScorer(scorer);
- }
- @Override
- public void collect(int doc) throws IOException {
- collector.collect(doc);
- }
- @Override
- public void setNextReader(IndexReader reader, int docBase) throws IOException {
- collector.setNextReader(reader, start + docBase);
- }
- @Override
- public boolean acceptsDocsOutOfOrder() {
- return collector.acceptsDocsOutOfOrder();
- }
- };
-
- searchables[i].search(weight, filter, hc);
- }
- }
-
- @Override
- public Query rewrite(Query original) throws IOException {
- final Query[] queries = new Query[searchables.length];
- for (int i = 0; i < searchables.length; i++) {
- queries[i] = searchables[i].rewrite(original);
- }
- return queries[0].combine(queries);
- }
-
- @Override
- public Explanation explain(Weight weight, int doc) throws IOException {
- final int i = subSearcher(doc); // find searcher index
- return searchables[i].explain(weight, doc - starts[i]); // dispatch to searcher
- }
-
- /**
- * Create weight in multiple index scenario.
- *
- * Distributed query processing is done in the following steps:
- * 1. rewrite query
- * 2. extract necessary terms
- * 3. collect dfs for these terms from the Searchables
- * 4. create query weight using aggregate dfs.
- * 5. distribute that weight to Searchables
- * 6. merge results
- *
- * Steps 1-4 are done here, 5+6 in the search() methods
- *
- * @return rewritten queries
- */
- @Override
- protected Weight createWeight(Query original) throws IOException {
- // step 1
- final Query rewrittenQuery = rewrite(original);
-
- // step 2
- final Set terms = new HashSet();
- rewrittenQuery.extractTerms(terms);
-
- // step3
- final Map dfMap = createDocFrequencyMap(terms);
-
- // step4
- final int numDocs = maxDoc();
- final CachedDfSource cacheSim = new CachedDfSource(dfMap, numDocs, getSimilarity());
-
- return rewrittenQuery.weight(cacheSim);
- }
- /**
- * Collects the document frequency for the given terms form all searchables
- * @param terms term set used to collect the document frequency form all
- * searchables
- * @return a map with a term as the key and the terms aggregated document
- * frequency as a value
- * @throws IOException if a searchable throws an {@link IOException}
- */
- Map createDocFrequencyMap(final Set terms) throws IOException {
- final Term[] allTermsArray = terms.toArray(new Term[terms.size()]);
- final int[] aggregatedDfs = new int[allTermsArray.length];
- for (Searchable searchable : searchables) {
- final int[] dfs = searchable.docFreqs(allTermsArray);
- for(int j=0; j dfMap = new HashMap();
- for(int i=0; i {
-
- private final Lock lock;
- private final Searchable searchable;
- private final Weight weight;
- private final Filter filter;
- private final int nDocs;
- private final int i;
- private final HitQueue hq;
- private final int[] starts;
-
- public MultiSearcherCallableNoSort(Lock lock, Searchable searchable, Weight weight,
- Filter filter, int nDocs, HitQueue hq, int i, int[] starts) {
- this.lock = lock;
- this.searchable = searchable;
- this.weight = weight;
- this.filter = filter;
- this.nDocs = nDocs;
- this.hq = hq;
- this.i = i;
- this.starts = starts;
- }
-
- public TopDocs call() throws IOException {
- final TopDocs docs = searchable.search (weight, filter, nDocs);
- final ScoreDoc[] scoreDocs = docs.scoreDocs;
- for (int j = 0; j < scoreDocs.length; j++) { // merge scoreDocs into hq
- final ScoreDoc scoreDoc = scoreDocs[j];
- scoreDoc.doc += starts[i]; // convert doc
- //it would be so nice if we had a thread-safe insert
- lock.lock();
- try {
- if (scoreDoc == hq.insertWithOverflow(scoreDoc))
- break;
- } finally {
- lock.unlock();
- }
- }
- return docs;
- }
- }
-
- /**
- * A thread subclass for searching a single searchable
- */
- static final class MultiSearcherCallableWithSort implements Callable {
-
- private final Lock lock;
- private final Searchable searchable;
- private final Weight weight;
- private final Filter filter;
- private final int nDocs;
- private final int i;
- private final FieldDocSortedHitQueue hq;
- private final int[] starts;
- private final Sort sort;
-
- public MultiSearcherCallableWithSort(Lock lock, Searchable searchable, Weight weight,
- Filter filter, int nDocs, FieldDocSortedHitQueue hq, Sort sort, int i, int[] starts) {
- this.lock = lock;
- this.searchable = searchable;
- this.weight = weight;
- this.filter = filter;
- this.nDocs = nDocs;
- this.hq = hq;
- this.i = i;
- this.starts = starts;
- this.sort = sort;
- }
-
- public TopFieldDocs call() throws IOException {
- final TopFieldDocs docs = searchable.search (weight, filter, nDocs, sort);
- // If one of the Sort fields is FIELD_DOC, need to fix its values, so that
- // it will break ties by doc Id properly. Otherwise, it will compare to
- // 'relative' doc Ids, that belong to two different searchables.
- for (int j = 0; j < docs.fields.length; j++) {
- if (docs.fields[j].getType() == SortField.DOC) {
- // iterate over the score docs and change their fields value
- for (int j2 = 0; j2 < docs.scoreDocs.length; j2++) {
- FieldDoc fd = (FieldDoc) docs.scoreDocs[j2];
- fd.fields[j] = Integer.valueOf(((Integer) fd.fields[j]).intValue() + starts[i]);
- }
- break;
- }
- }
-
- lock.lock();
- try {
- hq.setFields(docs.fields);
- } finally {
- lock.unlock();
- }
-
- final ScoreDoc[] scoreDocs = docs.scoreDocs;
- for (int j = 0; j < scoreDocs.length; j++) { // merge scoreDocs into hq
- final FieldDoc fieldDoc = (FieldDoc) scoreDocs[j];
- fieldDoc.doc += starts[i]; // convert doc
- //it would be so nice if we had a thread-safe insert
- lock.lock();
- try {
- if (fieldDoc == hq.insertWithOverflow(fieldDoc))
- break;
- } finally {
- lock.unlock();
- }
- }
- return docs;
- }
- }
-
-}
diff --git a/lucene/src/java/org/apache/lucene/search/ParallelMultiSearcher.java b/lucene/src/java/org/apache/lucene/search/ParallelMultiSearcher.java
deleted file mode 100644
index e381bb91014..00000000000
--- a/lucene/src/java/org/apache/lucene/search/ParallelMultiSearcher.java
+++ /dev/null
@@ -1,290 +0,0 @@
-package org.apache.lucene.search;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.NoSuchElementException;
-import java.util.Set;
-import java.util.concurrent.Callable;
-import java.util.concurrent.CompletionService;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Executor;
-import java.util.concurrent.ExecutorCompletionService;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
-
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.util.NamedThreadFactory;
-import org.apache.lucene.util.ThreadInterruptedException;
-
-/** Implements parallel search over a set of Searchables
.
- *
- * Applications usually need only call the inherited {@link #search(Query,int)}
- * or {@link #search(Query,Filter,int)} methods.
- */
-public class ParallelMultiSearcher extends MultiSearcher {
- private final ExecutorService executor;
- private final Searchable[] searchables;
- private final int[] starts;
-
- /** Creates a {@link Searchable} which searches searchables with the default
- * executor service (a cached thread pool). */
- public ParallelMultiSearcher(Searchable... searchables) throws IOException {
- this(Executors.newCachedThreadPool(new NamedThreadFactory(ParallelMultiSearcher.class.getSimpleName())), searchables);
- }
-
- /**
- * Creates a {@link Searchable} which searches searchables with the specified ExecutorService.
- */
- public ParallelMultiSearcher(ExecutorService executor, Searchable... searchables) throws IOException {
- super(searchables);
- this.searchables = searchables;
- this.starts = getStarts();
- this.executor = executor;
- }
- /**
- * Executes each {@link Searchable}'s docFreq() in its own thread and waits for each search to complete and merge
- * the results back together.
- */
- @Override
- public int docFreq(final Term term) throws IOException {
- final ExecutionHelper runner = new ExecutionHelper(executor);
- for(int i = 0; i < searchables.length; i++) {
- final Searchable searchable = searchables[i];
- runner.submit(new Callable() {
- public Integer call() throws IOException {
- return Integer.valueOf(searchable.docFreq(term));
- }
- });
- }
- int docFreq = 0;
- for (Integer num : runner) {
- docFreq += num.intValue();
- }
- return docFreq;
- }
-
- /**
- * A search implementation which executes each
- * {@link Searchable} in its own thread and waits for each search to complete and merge
- * the results back together.
- */
- @Override
- public TopDocs search(Weight weight, Filter filter, int nDocs) throws IOException {
- final HitQueue hq = new HitQueue(nDocs, false);
- final Lock lock = new ReentrantLock();
- final ExecutionHelper runner = new ExecutionHelper(executor);
-
- for (int i = 0; i < searchables.length; i++) { // search each searchable
- runner.submit(
- new MultiSearcherCallableNoSort(lock, searchables[i], weight, filter, nDocs, hq, i, starts));
- }
-
- int totalHits = 0;
- float maxScore = Float.NEGATIVE_INFINITY;
- for (final TopDocs topDocs : runner) {
- totalHits += topDocs.totalHits;
- maxScore = Math.max(maxScore, topDocs.getMaxScore());
- }
-
- final ScoreDoc[] scoreDocs = new ScoreDoc[hq.size()];
- for (int i = hq.size() - 1; i >= 0; i--) // put docs in array
- scoreDocs[i] = hq.pop();
-
- return new TopDocs(totalHits, scoreDocs, maxScore);
- }
-
- /**
- * A search implementation allowing sorting which spans a new thread for each
- * Searchable, waits for each search to complete and merges
- * the results back together.
- */
- @Override
- public TopFieldDocs search(Weight weight, Filter filter, int nDocs, Sort sort) throws IOException {
- if (sort == null) throw new NullPointerException();
-
- final FieldDocSortedHitQueue hq = new FieldDocSortedHitQueue(nDocs);
- final Lock lock = new ReentrantLock();
- final ExecutionHelper runner = new ExecutionHelper(executor);
- for (int i = 0; i < searchables.length; i++) { // search each searchable
- runner.submit(
- new MultiSearcherCallableWithSort(lock, searchables[i], weight, filter, nDocs, hq, sort, i, starts));
- }
- int totalHits = 0;
- float maxScore = Float.NEGATIVE_INFINITY;
- for (final TopFieldDocs topFieldDocs : runner) {
- totalHits += topFieldDocs.totalHits;
- maxScore = Math.max(maxScore, topFieldDocs.getMaxScore());
- }
- final ScoreDoc[] scoreDocs = new ScoreDoc[hq.size()];
- for (int i = hq.size() - 1; i >= 0; i--) // put docs in array
- scoreDocs[i] = hq.pop();
-
- return new TopFieldDocs(totalHits, scoreDocs, hq.getFields(), maxScore);
- }
-
- /** Lower-level search API.
- *
- * {@link Collector#collect(int)} is called for every matching document.
- *
- *
Applications should only use this if they need all of the
- * matching documents. The high-level search API ({@link
- * Searcher#search(Query,int)}) is usually more efficient, as it skips
- * non-high-scoring hits.
- *
- *
This method cannot be parallelized, because {@link Collector}
- * supports no concurrent access.
- *
- * @param weight to match documents
- * @param filter if non-null, a bitset used to eliminate some documents
- * @param collector to receive hits
- */
- @Override
- public void search(final Weight weight, final Filter filter, final Collector collector)
- throws IOException {
- for (int i = 0; i < searchables.length; i++) {
-
- final int start = starts[i];
-
- final Collector hc = new Collector() {
- @Override
- public void setScorer(final Scorer scorer) throws IOException {
- collector.setScorer(scorer);
- }
-
- @Override
- public void collect(final int doc) throws IOException {
- collector.collect(doc);
- }
-
- @Override
- public void setNextReader(final IndexReader reader, final int docBase) throws IOException {
- collector.setNextReader(reader, start + docBase);
- }
-
- @Override
- public boolean acceptsDocsOutOfOrder() {
- return collector.acceptsDocsOutOfOrder();
- }
- };
-
- searchables[i].search(weight, filter, hc);
- }
- }
-
- @Override
- public void close() throws IOException {
- executor.shutdown();
- super.close();
- }
-
- @Override
- HashMap createDocFrequencyMap(Set terms) throws IOException {
- final Term[] allTermsArray = terms.toArray(new Term[terms.size()]);
- final int[] aggregatedDocFreqs = new int[terms.size()];
- final ExecutionHelper runner = new ExecutionHelper(executor);
- for (Searchable searchable : searchables) {
- runner.submit(
- new DocumentFrequencyCallable(searchable, allTermsArray));
- }
- final int docFreqLen = aggregatedDocFreqs.length;
- for (final int[] docFreqs : runner) {
- for(int i=0; i < docFreqLen; i++){
- aggregatedDocFreqs[i] += docFreqs[i];
- }
- }
-
- final HashMap dfMap = new HashMap();
- for(int i=0; i {
- private final Searchable searchable;
- private final Term[] terms;
-
- public DocumentFrequencyCallable(Searchable searchable, Term[] terms) {
- this.searchable = searchable;
- this.terms = terms;
- }
-
- public int[] call() throws Exception {
- return searchable.docFreqs(terms);
- }
- }
-
- /**
- * A helper class that wraps a {@link CompletionService} and provides an
- * iterable interface to the completed {@link Callable} instances.
- *
- * @param
- * the type of the {@link Callable} return value
- */
- private static final class ExecutionHelper implements Iterator, Iterable {
- private final CompletionService service;
- private int numTasks;
-
- ExecutionHelper(final Executor executor) {
- this.service = new ExecutorCompletionService(executor);
- }
-
- public boolean hasNext() {
- return numTasks > 0;
- }
-
- public void submit(Callable task) {
- this.service.submit(task);
- ++numTasks;
- }
-
- public T next() {
- if(!this.hasNext())
- throw new NoSuchElementException();
- try {
- return service.take().get();
- } catch (InterruptedException e) {
- throw new ThreadInterruptedException(e);
- } catch (ExecutionException e) {
- throw new RuntimeException(e);
- } finally {
- --numTasks;
- }
- }
-
- public void remove() {
- throw new UnsupportedOperationException();
- }
-
- public Iterator iterator() {
- // use the shortcut here - this is only used in a privat context
- return this;
- }
-
- }
-}
diff --git a/lucene/src/java/org/apache/lucene/search/PhraseQuery.java b/lucene/src/java/org/apache/lucene/search/PhraseQuery.java
index cfe58717fbf..c5c287b84a8 100644
--- a/lucene/src/java/org/apache/lucene/search/PhraseQuery.java
+++ b/lucene/src/java/org/apache/lucene/search/PhraseQuery.java
@@ -143,7 +143,7 @@ public class PhraseQuery extends Query {
private float queryWeight;
private IDFExplanation idfExp;
- public PhraseWeight(Searcher searcher)
+ public PhraseWeight(IndexSearcher searcher)
throws IOException {
this.similarity = getSimilarity(searcher);
@@ -311,7 +311,7 @@ public class PhraseQuery extends Query {
}
@Override
- public Weight createWeight(Searcher searcher) throws IOException {
+ public Weight createWeight(IndexSearcher searcher) throws IOException {
if (terms.size() == 1) { // optimize one-term case
Term term = terms.get(0);
Query termQuery = new TermQuery(term);
diff --git a/lucene/src/java/org/apache/lucene/search/Query.java b/lucene/src/java/org/apache/lucene/search/Query.java
index 9250c1fa091..8cffc52d2f4 100644
--- a/lucene/src/java/org/apache/lucene/search/Query.java
+++ b/lucene/src/java/org/apache/lucene/search/Query.java
@@ -19,8 +19,6 @@ package org.apache.lucene.search;
import java.io.IOException;
-import java.util.HashSet;
-
import java.util.Set;
import org.apache.lucene.index.IndexReader;
@@ -89,14 +87,14 @@ public abstract class Query implements java.io.Serializable, Cloneable {
*
* Only implemented by primitive queries, which re-write to themselves.
*/
- public Weight createWeight(Searcher searcher) throws IOException {
+ public Weight createWeight(IndexSearcher searcher) throws IOException {
throw new UnsupportedOperationException();
}
/**
* Expert: Constructs and initializes a Weight for a top-level query.
*/
- public Weight weight(Searcher searcher) throws IOException {
+ public Weight weight(IndexSearcher searcher) throws IOException {
Query query = searcher.rewrite(this);
Weight weight = query.createWeight(searcher);
float sum = weight.sumOfSquaredWeights();
@@ -116,52 +114,6 @@ public abstract class Query implements java.io.Serializable, Cloneable {
return this;
}
-
- /** Expert: called when re-writing queries under MultiSearcher.
- *
- * Create a single query suitable for use by all subsearchers (in 1-1
- * correspondence with queries). This is an optimization of the OR of
- * all queries. We handle the common optimization cases of equal
- * queries and overlapping clauses of boolean OR queries (as generated
- * by MultiTermQuery.rewrite()).
- * Be careful overriding this method as queries[0] determines which
- * method will be called and is not necessarily of the same type as
- * the other queries.
- */
- public Query combine(Query[] queries) {
- HashSet uniques = new HashSet();
- for (int i = 0; i < queries.length; i++) {
- Query query = queries[i];
- BooleanClause[] clauses = null;
- // check if we can split the query into clauses
- boolean splittable = (query instanceof BooleanQuery);
- if(splittable){
- BooleanQuery bq = (BooleanQuery) query;
- splittable = bq.isCoordDisabled();
- clauses = bq.getClauses();
- for (int j = 0; splittable && j < clauses.length; j++) {
- splittable = (clauses[j].getOccur() == BooleanClause.Occur.SHOULD);
- }
- }
- if(splittable){
- for (int j = 0; j < clauses.length; j++) {
- uniques.add(clauses[j].getQuery());
- }
- } else {
- uniques.add(query);
- }
- }
- // optimization: if we have just one query, just return it
- if(uniques.size() == 1){
- return uniques.iterator().next();
- }
- BooleanQuery result = new BooleanQuery(true);
- for (final Query query : uniques)
- result.add(query, BooleanClause.Occur.SHOULD);
- return result;
- }
-
-
/**
* Expert: adds all terms occurring in this query to the terms set. Only
* works if this query is in its {@link #rewrite rewritten} form.
@@ -174,35 +126,11 @@ public abstract class Query implements java.io.Serializable, Cloneable {
}
-
- /** Expert: merges the clauses of a set of BooleanQuery's into a single
- * BooleanQuery.
- *
- *A utility for use by {@link #combine(Query[])} implementations.
- */
- public static Query mergeBooleanQueries(BooleanQuery... queries) {
- HashSet allClauses = new HashSet();
- for (BooleanQuery booleanQuery : queries) {
- for (BooleanClause clause : booleanQuery) {
- allClauses.add(clause);
- }
- }
-
- boolean coordDisabled =
- queries.length==0? false : queries[0].isCoordDisabled();
- BooleanQuery result = new BooleanQuery(coordDisabled);
- for(BooleanClause clause2 : allClauses) {
- result.add(clause2);
- }
- return result;
- }
-
-
/** Expert: Returns the Similarity implementation to be used for this query.
* Subclasses may override this method to specify their own Similarity
* implementation, perhaps one that delegates through that of the Searcher.
* By default the Searcher's Similarity implementation is returned.*/
- public Similarity getSimilarity(Searcher searcher) {
+ public Similarity getSimilarity(IndexSearcher searcher) {
return searcher.getSimilarity();
}
diff --git a/lucene/src/java/org/apache/lucene/search/Searchable.java b/lucene/src/java/org/apache/lucene/search/Searchable.java
deleted file mode 100644
index 121b231af22..00000000000
--- a/lucene/src/java/org/apache/lucene/search/Searchable.java
+++ /dev/null
@@ -1,165 +0,0 @@
-package org.apache.lucene.search;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-import java.io.Closeable;
-
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.FieldSelector;
-import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.Term;
-
-/**
- * The interface for search implementations.
- *
- *
- * Searchable is the abstract network protocol for searching. Implementations
- * provide search over a single index, over multiple indices, and over indices
- * on remote servers.
- *
- *
- * Queries, filters and sort criteria are designed to be compact so that they
- * may be efficiently passed to a remote index, with only the top-scoring hits
- * being returned, rather than every matching hit.
- *
- * NOTE: this interface is kept public for convenience. Since it is not
- * expected to be implemented directly, it may be changed unexpectedly between
- * releases.
- */
-public interface Searchable extends Closeable {
-
- /**
- * Lower-level search API.
- *
- *
- * {@link Collector#collect(int)} is called for every document.
- * Collector-based access to remote indexes is discouraged.
- *
- *
- * Applications should only use this if they need all of the matching
- * documents. The high-level search API ({@link Searcher#search(Query,int)}) is
- * usually more efficient, as it skips non-high-scoring hits.
- *
- * @param weight
- * to match documents
- * @param filter
- * if non-null, used to permit documents to be collected.
- * @param collector
- * to receive hits
- * @throws BooleanQuery.TooManyClauses
- */
- void search(Weight weight, Filter filter, Collector collector) throws IOException;
-
- /** Frees resources associated with this Searcher.
- * Be careful not to call this method while you are still using objects
- * that reference this Searchable.
- */
- void close() throws IOException;
-
- /** Expert: Returns the number of documents containing term
.
- *
- * @see org.apache.lucene.index.IndexReader#docFreq(Term)
- */
- int docFreq(Term term) throws IOException;
-
- /** Expert: For each term in the terms array, calculates the number of
- * documents containing term
. Returns an array with these
- * document frequencies. Used to minimize number of remote calls.
- */
- int[] docFreqs(Term[] terms) throws IOException;
-
- /** Expert: Returns one greater than the largest possible document number.
- *
- * @see org.apache.lucene.index.IndexReader#maxDoc()
- */
- int maxDoc() throws IOException;
-
- /** Expert: Low-level search implementation. Finds the top n
- * hits for query
, applying filter
if non-null.
- *
- *
Applications should usually call {@link Searcher#search(Query,int)} or
- * {@link Searcher#search(Query,Filter,int)} instead.
- * @throws BooleanQuery.TooManyClauses
- */
- TopDocs search(Weight weight, Filter filter, int n) throws IOException;
-
- /**
- * Returns the stored fields of document i
.
- *
- * @see org.apache.lucene.index.IndexReader#document(int)
- * @throws CorruptIndexException if the index is corrupt
- * @throws IOException if there is a low-level IO error
- */
- Document doc(int i) throws CorruptIndexException, IOException;
-
- /**
- * Get the {@link org.apache.lucene.document.Document} at the n
th position. The {@link org.apache.lucene.document.FieldSelector}
- * may be used to determine what {@link org.apache.lucene.document.Field}s to load and how they should be loaded.
- *
- * NOTE: If the underlying Reader (more specifically, the underlying FieldsReader
) is closed before the lazy {@link org.apache.lucene.document.Field} is
- * loaded an exception may be thrown. If you want the value of a lazy {@link org.apache.lucene.document.Field} to be available after closing you must
- * explicitly load it or fetch the Document again with a new loader.
- *
- *
- * @param n Get the document at the n
th position
- * @param fieldSelector The {@link org.apache.lucene.document.FieldSelector} to use to determine what Fields should be loaded on the Document. May be null, in which case all Fields will be loaded.
- * @return The stored fields of the {@link org.apache.lucene.document.Document} at the nth position
- * @throws CorruptIndexException if the index is corrupt
- * @throws IOException if there is a low-level IO error
- *
- * @see org.apache.lucene.index.IndexReader#document(int, FieldSelector)
- * @see org.apache.lucene.document.Fieldable
- * @see org.apache.lucene.document.FieldSelector
- * @see org.apache.lucene.document.SetBasedFieldSelector
- * @see org.apache.lucene.document.LoadFirstFieldSelector
- */
- Document doc(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException;
-
- /** Expert: called to re-write queries into primitive queries.
- * @throws BooleanQuery.TooManyClauses
- */
- Query rewrite(Query query) throws IOException;
-
- /** Expert: low-level implementation method
- * Returns an Explanation that describes how doc
scored against
- * weight
.
- *
- *
This is intended to be used in developing Similarity implementations,
- * and, for good performance, should not be displayed with every hit.
- * Computing an explanation is as expensive as executing the query over the
- * entire index.
- *
Applications should call {@link Searcher#explain(Query, int)}.
- * @throws BooleanQuery.TooManyClauses
- */
- Explanation explain(Weight weight, int doc) throws IOException;
-
- /** Expert: Low-level search implementation with arbitrary sorting. Finds
- * the top n
hits for query
, applying
- * filter
if non-null, and sorting the hits by the criteria in
- * sort
.
- *
- *
Applications should usually call {@link
- * Searcher#search(Query,Filter,int,Sort)} instead.
- *
- * @throws BooleanQuery.TooManyClauses
- */
- TopFieldDocs search(Weight weight, Filter filter, int n, Sort sort)
- throws IOException;
-
-}
diff --git a/lucene/src/java/org/apache/lucene/search/Searcher.java b/lucene/src/java/org/apache/lucene/search/Searcher.java
deleted file mode 100644
index 46f125650a6..00000000000
--- a/lucene/src/java/org/apache/lucene/search/Searcher.java
+++ /dev/null
@@ -1,183 +0,0 @@
-package org.apache.lucene.search;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-
-import org.apache.lucene.document.Document;
-import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.document.FieldSelector;
-
-/**
- * An abstract base class for search implementations. Implements the main search
- * methods.
- *
- *
- * Note that you can only access hits from a Searcher as long as it is not yet
- * closed, otherwise an IOException will be thrown.
- */
-public abstract class Searcher implements Searchable {
- /** Search implementation with arbitrary sorting. Finds
- * the top n
hits for query
, applying
- * filter
if non-null, and sorting the hits by the criteria in
- * sort
.
- *
- *
NOTE: this does not compute scores by default; use
- * {@link IndexSearcher#setDefaultFieldSortScoring} to
- * enable scoring.
- *
- * @throws BooleanQuery.TooManyClauses
- */
- public TopFieldDocs search(Query query, Filter filter, int n,
- Sort sort) throws IOException {
- return search(createWeight(query), filter, n, sort);
- }
-
- /**
- * Search implementation with arbitrary sorting and no filter.
- * @param query The query to search for
- * @param n Return only the top n results
- * @param sort The {@link org.apache.lucene.search.Sort} object
- * @return The top docs, sorted according to the supplied {@link org.apache.lucene.search.Sort} instance
- * @throws IOException
- */
- public TopFieldDocs search(Query query, int n,
- Sort sort) throws IOException {
- return search(createWeight(query), null, n, sort);
- }
-
- /** Lower-level search API.
- *
- *
{@link Collector#collect(int)} is called for every matching document.
- *
- *
Applications should only use this if they need all of the
- * matching documents. The high-level search API ({@link
- * Searcher#search(Query, int)}) is usually more efficient, as it skips
- * non-high-scoring hits.
- *
Note: The score
passed to this method is a raw score.
- * In other words, the score will not necessarily be a float whose value is
- * between 0 and 1.
- * @throws BooleanQuery.TooManyClauses
- */
- public void search(Query query, Collector results)
- throws IOException {
- search(createWeight(query), null, results);
- }
-
- /** Lower-level search API.
- *
- *
{@link Collector#collect(int)} is called for every matching
- * document.
- *
Collector-based access to remote indexes is discouraged.
- *
- *
Applications should only use this if they need all of the
- * matching documents. The high-level search API ({@link
- * Searcher#search(Query, Filter, int)}) is usually more efficient, as it skips
- * non-high-scoring hits.
- *
- * @param query to match documents
- * @param filter if non-null, used to permit documents to be collected.
- * @param results to receive hits
- * @throws BooleanQuery.TooManyClauses
- */
- public void search(Query query, Filter filter, Collector results)
- throws IOException {
- search(createWeight(query), filter, results);
- }
-
- /** Finds the top n
- * hits for query
, applying filter
if non-null.
- *
- * @throws BooleanQuery.TooManyClauses
- */
- public TopDocs search(Query query, Filter filter, int n)
- throws IOException {
- return search(createWeight(query), filter, n);
- }
-
- /** Finds the top n
- * hits for query
.
- *
- * @throws BooleanQuery.TooManyClauses
- */
- public TopDocs search(Query query, int n)
- throws IOException {
- return search(query, null, n);
- }
-
- /** Returns an Explanation that describes how doc
scored against
- * query
.
- *
- *
This is intended to be used in developing Similarity implementations,
- * and, for good performance, should not be displayed with every hit.
- * Computing an explanation is as expensive as executing the query over the
- * entire index.
- */
- public Explanation explain(Query query, int doc) throws IOException {
- return explain(createWeight(query), doc);
- }
-
- /** The Similarity implementation used by this searcher. */
- private Similarity similarity = Similarity.getDefault();
-
- /** Expert: Set the Similarity implementation used by this Searcher.
- *
- * @see Similarity#setDefault(Similarity)
- */
- public void setSimilarity(Similarity similarity) {
- this.similarity = similarity;
- }
-
- /** Expert: Return the Similarity implementation used by this Searcher.
- *
- *
This defaults to the current value of {@link Similarity#getDefault()}.
- */
- public Similarity getSimilarity() {
- return this.similarity;
- }
-
- /**
- * creates a weight for query
- * @return new weight
- */
- protected Weight createWeight(Query query) throws IOException {
- return query.weight(this);
- }
-
- // inherit javadoc
- public int[] docFreqs(Term[] terms) throws IOException {
- int[] result = new int[terms.length];
- for (int i = 0; i < terms.length; i++) {
- result[i] = docFreq(terms[i]);
- }
- return result;
- }
-
- abstract public void search(Weight weight, Filter filter, Collector results) throws IOException;
- abstract public void close() throws IOException;
- abstract public int docFreq(Term term) throws IOException;
- abstract public int maxDoc() throws IOException;
- abstract public TopDocs search(Weight weight, Filter filter, int n) throws IOException;
- abstract public Document doc(int i) throws CorruptIndexException, IOException;
- abstract public Document doc(int docid, FieldSelector fieldSelector) throws CorruptIndexException, IOException;
- abstract public Query rewrite(Query query) throws IOException;
- abstract public Explanation explain(Weight weight, int doc) throws IOException;
- abstract public TopFieldDocs search(Weight weight, Filter filter, int n, Sort sort) throws IOException;
- /* End patch for GCJ bug #15411. */
-}
diff --git a/lucene/src/java/org/apache/lucene/search/Similarity.java b/lucene/src/java/org/apache/lucene/search/Similarity.java
index d97095c858c..a9916eca955 100644
--- a/lucene/src/java/org/apache/lucene/search/Similarity.java
+++ b/lucene/src/java/org/apache/lucene/search/Similarity.java
@@ -722,7 +722,7 @@ public abstract class Similarity implements Serializable {
and an explanation for the term.
* @throws IOException
*/
- public IDFExplanation idfExplain(final Term term, final Searcher searcher, int docFreq) throws IOException {
+ public IDFExplanation idfExplain(final Term term, final IndexSearcher searcher, int docFreq) throws IOException {
final int df = docFreq;
final int max = searcher.maxDoc();
final float idf = idf(df, max);
@@ -743,7 +743,7 @@ public abstract class Similarity implements Serializable {
* #idfExplain(Term,Searcher,int)} by passing
* searcher.docFreq(term)
as the docFreq.
*/
- public IDFExplanation idfExplain(final Term term, final Searcher searcher) throws IOException {
+ public IDFExplanation idfExplain(final Term term, final IndexSearcher searcher) throws IOException {
return idfExplain(term, searcher, searcher.docFreq(term));
}
@@ -761,7 +761,7 @@ public abstract class Similarity implements Serializable {
* for each term.
* @throws IOException
*/
- public IDFExplanation idfExplain(Collection terms, Searcher searcher) throws IOException {
+ public IDFExplanation idfExplain(Collection terms, IndexSearcher searcher) throws IOException {
final int max = searcher.maxDoc();
float idf = 0.0f;
final StringBuilder exp = new StringBuilder();
diff --git a/lucene/src/java/org/apache/lucene/search/TermQuery.java b/lucene/src/java/org/apache/lucene/search/TermQuery.java
index 4fda9b9fa17..6eb34c6eab9 100644
--- a/lucene/src/java/org/apache/lucene/search/TermQuery.java
+++ b/lucene/src/java/org/apache/lucene/search/TermQuery.java
@@ -41,7 +41,7 @@ public class TermQuery extends Query {
private float queryWeight;
private IDFExplanation idfExp;
- public TermWeight(Searcher searcher)
+ public TermWeight(IndexSearcher searcher)
throws IOException {
this.similarity = getSimilarity(searcher);
if (docFreq != -1) {
@@ -180,7 +180,7 @@ public class TermQuery extends Query {
public Term getTerm() { return term; }
@Override
- public Weight createWeight(Searcher searcher) throws IOException {
+ public Weight createWeight(IndexSearcher searcher) throws IOException {
return new TermWeight(searcher);
}
diff --git a/lucene/src/java/org/apache/lucene/search/function/CustomScoreQuery.java b/lucene/src/java/org/apache/lucene/search/function/CustomScoreQuery.java
index 4be1ae334bb..e1e39f3fd21 100755
--- a/lucene/src/java/org/apache/lucene/search/function/CustomScoreQuery.java
+++ b/lucene/src/java/org/apache/lucene/search/function/CustomScoreQuery.java
@@ -28,7 +28,7 @@ import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Weight;
import org.apache.lucene.search.Scorer;
-import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Similarity;
import org.apache.lucene.util.ToStringUtils;
@@ -187,7 +187,7 @@ public class CustomScoreQuery extends Query {
Weight[] valSrcWeights;
boolean qStrict;
- public CustomWeight(Searcher searcher) throws IOException {
+ public CustomWeight(IndexSearcher searcher) throws IOException {
this.similarity = getSimilarity(searcher);
this.subQueryWeight = subQuery.weight(searcher);
this.valSrcWeights = new Weight[valSrcQueries.length];
@@ -350,7 +350,7 @@ public class CustomScoreQuery extends Query {
}
@Override
- public Weight createWeight(Searcher searcher) throws IOException {
+ public Weight createWeight(IndexSearcher searcher) throws IOException {
return new CustomWeight(searcher);
}
diff --git a/lucene/src/java/org/apache/lucene/search/function/ValueSourceQuery.java b/lucene/src/java/org/apache/lucene/search/function/ValueSourceQuery.java
index a0387fc1f9a..219aea3da5a 100644
--- a/lucene/src/java/org/apache/lucene/search/function/ValueSourceQuery.java
+++ b/lucene/src/java/org/apache/lucene/search/function/ValueSourceQuery.java
@@ -68,7 +68,7 @@ public class ValueSourceQuery extends Query {
float queryNorm;
float queryWeight;
- public ValueSourceWeight(Searcher searcher) {
+ public ValueSourceWeight(IndexSearcher searcher) {
this.similarity = getSimilarity(searcher);
}
@@ -173,7 +173,7 @@ public class ValueSourceQuery extends Query {
}
@Override
- public Weight createWeight(Searcher searcher) {
+ public Weight createWeight(IndexSearcher searcher) {
return new ValueSourceQuery.ValueSourceWeight(searcher);
}
diff --git a/lucene/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java b/lucene/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java
index 2202b7cc657..37bb6c7d32c 100644
--- a/lucene/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java
+++ b/lucene/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java
@@ -20,7 +20,7 @@ package org.apache.lucene.search.payloads;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.Scorer;
-import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Similarity;
import org.apache.lucene.search.Weight;
import org.apache.lucene.search.spans.NearSpansOrdered;
@@ -66,7 +66,7 @@ public class PayloadNearQuery extends SpanNearQuery {
}
@Override
- public Weight createWeight(Searcher searcher) throws IOException {
+ public Weight createWeight(IndexSearcher searcher) throws IOException {
return new PayloadNearSpanWeight(this, searcher);
}
@@ -137,7 +137,7 @@ public class PayloadNearQuery extends SpanNearQuery {
}
public class PayloadNearSpanWeight extends SpanWeight {
- public PayloadNearSpanWeight(SpanQuery query, Searcher searcher)
+ public PayloadNearSpanWeight(SpanQuery query, IndexSearcher searcher)
throws IOException {
super(query, searcher);
}
diff --git a/lucene/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java b/lucene/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java
index 5a22e7c1f65..1d251447132 100644
--- a/lucene/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java
+++ b/lucene/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java
@@ -20,7 +20,7 @@ package org.apache.lucene.search.payloads;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
import org.apache.lucene.search.Similarity;
@@ -62,13 +62,13 @@ public class PayloadTermQuery extends SpanTermQuery {
}
@Override
- public Weight createWeight(Searcher searcher) throws IOException {
+ public Weight createWeight(IndexSearcher searcher) throws IOException {
return new PayloadTermWeight(this, searcher);
}
protected class PayloadTermWeight extends SpanWeight {
- public PayloadTermWeight(PayloadTermQuery query, Searcher searcher)
+ public PayloadTermWeight(PayloadTermQuery query, IndexSearcher searcher)
throws IOException {
super(query, searcher);
}
diff --git a/lucene/src/java/org/apache/lucene/search/spans/FieldMaskingSpanQuery.java b/lucene/src/java/org/apache/lucene/search/spans/FieldMaskingSpanQuery.java
index 05680cf0afd..16c88f30dee 100644
--- a/lucene/src/java/org/apache/lucene/search/spans/FieldMaskingSpanQuery.java
+++ b/lucene/src/java/org/apache/lucene/search/spans/FieldMaskingSpanQuery.java
@@ -24,7 +24,7 @@ import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Weight;
-import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Similarity;
import org.apache.lucene.util.ToStringUtils;
@@ -102,12 +102,12 @@ public class FieldMaskingSpanQuery extends SpanQuery {
}
@Override
- public Weight createWeight(Searcher searcher) throws IOException {
+ public Weight createWeight(IndexSearcher searcher) throws IOException {
return maskedQuery.createWeight(searcher);
}
@Override
- public Similarity getSimilarity(Searcher searcher) {
+ public Similarity getSimilarity(IndexSearcher searcher) {
return maskedQuery.getSimilarity(searcher);
}
diff --git a/lucene/src/java/org/apache/lucene/search/spans/SpanQuery.java b/lucene/src/java/org/apache/lucene/search/spans/SpanQuery.java
index cae18b60fb9..35c314b8de2 100644
--- a/lucene/src/java/org/apache/lucene/search/spans/SpanQuery.java
+++ b/lucene/src/java/org/apache/lucene/search/spans/SpanQuery.java
@@ -21,7 +21,7 @@ import java.io.IOException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.Query;
-import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Weight;
/** Base class for span-based queries. */
@@ -34,7 +34,7 @@ public abstract class SpanQuery extends Query {
public abstract String getField();
@Override
- public Weight createWeight(Searcher searcher) throws IOException {
+ public Weight createWeight(IndexSearcher searcher) throws IOException {
return new SpanWeight(this, searcher);
}
diff --git a/lucene/src/java/org/apache/lucene/search/spans/SpanWeight.java b/lucene/src/java/org/apache/lucene/search/spans/SpanWeight.java
index 28fd905a208..37451fecb2d 100644
--- a/lucene/src/java/org/apache/lucene/search/spans/SpanWeight.java
+++ b/lucene/src/java/org/apache/lucene/search/spans/SpanWeight.java
@@ -40,7 +40,7 @@ public class SpanWeight extends Weight {
protected SpanQuery query;
private IDFExplanation idfExp;
- public SpanWeight(SpanQuery query, Searcher searcher)
+ public SpanWeight(SpanQuery query, IndexSearcher searcher)
throws IOException {
this.similarity = query.getSimilarity(searcher);
this.query = query;
diff --git a/lucene/src/test/org/apache/lucene/TestSearch.java b/lucene/src/test/org/apache/lucene/TestSearch.java
index 2687a990bbb..199422ac86d 100644
--- a/lucene/src/test/org/apache/lucene/TestSearch.java
+++ b/lucene/src/test/org/apache/lucene/TestSearch.java
@@ -94,7 +94,7 @@ public class TestSearch extends LuceneTestCase {
}
writer.close();
- Searcher searcher = new IndexSearcher(directory, true);
+ IndexSearcher searcher = new IndexSearcher(directory, true);
String[] queries = {
"a b",
diff --git a/lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java b/lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java
index 20a309aed1c..366d32cd15c 100644
--- a/lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java
+++ b/lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java
@@ -102,7 +102,7 @@ public class TestSearchForDuplicates extends LuceneTestCase {
writer.close();
// try a search without OR
- Searcher searcher = new IndexSearcher(directory, true);
+ IndexSearcher searcher = new IndexSearcher(directory, true);
QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, PRIORITY_FIELD, analyzer);
@@ -133,7 +133,7 @@ public class TestSearchForDuplicates extends LuceneTestCase {
}
- private void printHits(PrintWriter out, ScoreDoc[] hits, Searcher searcher ) throws IOException {
+ private void printHits(PrintWriter out, ScoreDoc[] hits, IndexSearcher searcher) throws IOException {
out.println(hits.length + " total results\n");
for (int i = 0 ; i < hits.length; i++) {
if ( i < 10 || (i > 94 && i < 105) ) {
@@ -143,11 +143,11 @@ public class TestSearchForDuplicates extends LuceneTestCase {
}
}
- private void checkHits(ScoreDoc[] hits, int expectedCount, Searcher searcher) throws IOException {
+ private void checkHits(ScoreDoc[] hits, int expectedCount, IndexSearcher searcher) throws IOException {
assertEquals("total results", expectedCount, hits.length);
for (int i = 0 ; i < hits.length; i++) {
if ( i < 10 || (i > 94 && i < 105) ) {
- Document d = searcher.doc(hits[i].doc);
+ Document d = searcher.doc(hits[i].doc);
assertEquals("check " + i, String.valueOf(i), d.get(ID_FIELD));
}
}
diff --git a/lucene/src/test/org/apache/lucene/document/TestDocument.java b/lucene/src/test/org/apache/lucene/document/TestDocument.java
index 3044e23d7ef..51a71bf51dc 100644
--- a/lucene/src/test/org/apache/lucene/document/TestDocument.java
+++ b/lucene/src/test/org/apache/lucene/document/TestDocument.java
@@ -6,7 +6,6 @@ import org.apache.lucene.index.Term;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
-import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase;
@@ -157,7 +156,7 @@ public class TestDocument extends LuceneTestCase {
writer.addDocument(makeDocumentWithFields());
IndexReader reader = writer.getReader();
- Searcher searcher = new IndexSearcher(reader);
+ IndexSearcher searcher = new IndexSearcher(reader);
// search for something that does exists
Query query = new TermQuery(new Term("keyword", "test1"));
@@ -239,7 +238,7 @@ public class TestDocument extends LuceneTestCase {
writer.addDocument(doc);
IndexReader reader = writer.getReader();
- Searcher searcher = new IndexSearcher(reader);
+ IndexSearcher searcher = new IndexSearcher(reader);
Query query = new TermQuery(new Term("keyword", "test"));
diff --git a/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java b/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
index 49e625034c0..e85e4fb4bc5 100644
--- a/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
+++ b/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
@@ -360,7 +360,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
// First document should be #21 since it's norm was
// increased:
- Document d = searcher.doc(hits[0].doc);
+ Document d = searcher.getIndexReader().document(hits[0].doc);
assertEquals("didn't get the right document first", "21", d.get("id"));
doTestHits(hits, 34, searcher.getIndexReader());
@@ -408,7 +408,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
// make sure searching sees right # hits
IndexSearcher searcher = new IndexSearcher(dir, true);
ScoreDoc[] hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
- Document d = searcher.doc(hits[0].doc);
+ Document d = searcher.getIndexReader().document(hits[0].doc);
assertEquals("wrong first document", "21", d.get("id"));
doTestHits(hits, 44, searcher.getIndexReader());
searcher.close();
diff --git a/lucene/src/test/org/apache/lucene/index/TestLazyProxSkipping.java b/lucene/src/test/org/apache/lucene/index/TestLazyProxSkipping.java
index fd1dc5647a6..469302fdb4e 100755
--- a/lucene/src/test/org/apache/lucene/index/TestLazyProxSkipping.java
+++ b/lucene/src/test/org/apache/lucene/index/TestLazyProxSkipping.java
@@ -27,7 +27,6 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.ScoreDoc;
-import org.apache.lucene.search.Searcher;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.MockDirectoryWrapper;
@@ -40,7 +39,7 @@ import org.apache.lucene.util.BytesRef;
*
*/
public class TestLazyProxSkipping extends LuceneTestCase {
- private Searcher searcher;
+ private IndexSearcher searcher;
private int seeksCounter = 0;
private String field = "tokens";
diff --git a/lucene/src/test/org/apache/lucene/index/TestNRTThreads.java b/lucene/src/test/org/apache/lucene/index/TestNRTThreads.java
index 3024d470553..56a18c73fd6 100644
--- a/lucene/src/test/org/apache/lucene/index/TestNRTThreads.java
+++ b/lucene/src/test/org/apache/lucene/index/TestNRTThreads.java
@@ -25,6 +25,9 @@ import java.util.List;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
@@ -38,6 +41,7 @@ import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.util.NamedThreadFactory;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.LineFileDocs;
@@ -191,6 +195,8 @@ public class TestNRTThreads extends LuceneTestCase {
// silly starting guess:
final AtomicInteger totTermCount = new AtomicInteger(100);
+ final ExecutorService es = Executors.newCachedThreadPool(new NamedThreadFactory("NRT search threads"));
+
while(System.currentTimeMillis() < stopTime && !failed.get()) {
if (random.nextBoolean()) {
if (VERBOSE) {
@@ -228,7 +234,7 @@ public class TestNRTThreads extends LuceneTestCase {
if (r.numDocs() > 0) {
- final IndexSearcher s = new IndexSearcher(r);
+ final IndexSearcher s = new IndexSearcher(r, es);
// run search threads
final long searchStopTime = System.currentTimeMillis() + 500;
@@ -302,6 +308,9 @@ public class TestNRTThreads extends LuceneTestCase {
}
}
+ es.shutdown();
+ es.awaitTermination(1, TimeUnit.SECONDS);
+
if (VERBOSE) {
System.out.println("TEST: all searching done [" + (System.currentTimeMillis()-t0) + " ms]");
}
diff --git a/lucene/src/test/org/apache/lucene/index/TestOmitTf.java b/lucene/src/test/org/apache/lucene/index/TestOmitTf.java
index c96092afc60..49c72e6c029 100644
--- a/lucene/src/test/org/apache/lucene/index/TestOmitTf.java
+++ b/lucene/src/test/org/apache/lucene/index/TestOmitTf.java
@@ -41,7 +41,7 @@ public class TestOmitTf extends LuceneTestCase {
@Override public float sloppyFreq(int distance) { return 2.0f; }
@Override public float idf(int docFreq, int numDocs) { return 1.0f; }
@Override public float coord(int overlap, int maxOverlap) { return 1.0f; }
- @Override public IDFExplanation idfExplain(Collection terms, Searcher searcher) throws IOException {
+ @Override public IDFExplanation idfExplain(Collection terms, IndexSearcher searcher) throws IOException {
return new IDFExplanation() {
@Override
public float getIdf() {
@@ -279,7 +279,7 @@ public class TestOmitTf extends LuceneTestCase {
/*
* Verify the index
*/
- Searcher searcher = new IndexSearcher(dir, true);
+ IndexSearcher searcher = new IndexSearcher(dir, true);
searcher.setSimilarity(new SimpleSimilarity());
Term a = new Term("noTf", term);
diff --git a/lucene/src/test/org/apache/lucene/search/CheckHits.java b/lucene/src/test/org/apache/lucene/search/CheckHits.java
index 0efa5253254..dedd91949b6 100644
--- a/lucene/src/test/org/apache/lucene/search/CheckHits.java
+++ b/lucene/src/test/org/apache/lucene/search/CheckHits.java
@@ -42,7 +42,7 @@ public class CheckHits {
* (ie: Explanation value of 0.0f)
*/
public static void checkNoMatchExplanations(Query q, String defaultFieldName,
- Searcher searcher, int[] results)
+ IndexSearcher searcher, int[] results)
throws IOException {
String d = q.toString(defaultFieldName);
@@ -81,7 +81,7 @@ public class CheckHits {
* @see #checkHits
*/
public static void checkHitCollector(Random random, Query query, String defaultFieldName,
- Searcher searcher, int[] results)
+ IndexSearcher searcher, int[] results)
throws IOException {
QueryUtils.check(random,query,searcher);
@@ -97,20 +97,10 @@ public class CheckHits {
Assert.assertEquals("Simple: " + query.toString(defaultFieldName),
correct, actual);
- for (int i = -1; i < 2; i++) {
- actual.clear();
- QueryUtils.wrapSearcher(random, searcher, i).search(query, c);
- Assert.assertEquals("Wrap Searcher " + i + ": " +
- query.toString(defaultFieldName),
- correct, actual);
- }
-
- if ( ! ( searcher instanceof IndexSearcher ) ) return;
-
for (int i = -1; i < 2; i++) {
actual.clear();
QueryUtils.wrapUnderlyingReader
- (random, (IndexSearcher)searcher, i).search(query, c);
+ (random, searcher, i).search(query, c);
Assert.assertEquals("Wrap Reader " + i + ": " +
query.toString(defaultFieldName),
correct, actual);
@@ -157,7 +147,7 @@ public class CheckHits {
Random random,
Query query,
String defaultFieldName,
- Searcher searcher,
+ IndexSearcher searcher,
int[] results)
throws IOException {
@@ -284,7 +274,7 @@ public class CheckHits {
*/
public static void checkExplanations(Query query,
String defaultFieldName,
- Searcher searcher) throws IOException {
+ IndexSearcher searcher) throws IOException {
checkExplanations(query, defaultFieldName, searcher, false);
}
@@ -301,7 +291,7 @@ public class CheckHits {
*/
public static void checkExplanations(Query query,
String defaultFieldName,
- Searcher searcher,
+ IndexSearcher searcher,
boolean deep) throws IOException {
searcher.search(query,
@@ -455,7 +445,7 @@ public class CheckHits {
public static class ExplanationAsserter extends Collector {
Query q;
- Searcher s;
+ IndexSearcher s;
String d;
boolean deep;
@@ -463,10 +453,10 @@ public class CheckHits {
private int base = 0;
/** Constructs an instance which does shallow tests on the Explanation */
- public ExplanationAsserter(Query q, String defaultFieldName, Searcher s) {
+ public ExplanationAsserter(Query q, String defaultFieldName, IndexSearcher s) {
this(q,defaultFieldName,s,false);
}
- public ExplanationAsserter(Query q, String defaultFieldName, Searcher s, boolean deep) {
+ public ExplanationAsserter(Query q, String defaultFieldName, IndexSearcher s, boolean deep) {
this.q=q;
this.s=s;
this.d = q.toString(defaultFieldName);
diff --git a/lucene/src/test/org/apache/lucene/search/JustCompileSearch.java b/lucene/src/test/org/apache/lucene/search/JustCompileSearch.java
index b38c1f8790d..daa24c91658 100644
--- a/lucene/src/test/org/apache/lucene/search/JustCompileSearch.java
+++ b/lucene/src/test/org/apache/lucene/search/JustCompileSearch.java
@@ -19,11 +19,7 @@ package org.apache.lucene.search;
import java.io.IOException;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.FieldSelector;
-import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.Term;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.PriorityQueue;
@@ -38,116 +34,6 @@ final class JustCompileSearch {
private static final String UNSUPPORTED_MSG = "unsupported: used for back-compat testing only !";
- static final class JustCompileSearcher extends Searcher {
-
- @Override
- protected Weight createWeight(Query query) throws IOException {
- throw new UnsupportedOperationException(UNSUPPORTED_MSG);
- }
-
- @Override
- public void close() throws IOException {
- throw new UnsupportedOperationException(UNSUPPORTED_MSG);
- }
-
- @Override
- public Document doc(int i) throws CorruptIndexException, IOException {
- throw new UnsupportedOperationException(UNSUPPORTED_MSG);
- }
-
- @Override
- public int[] docFreqs(Term[] terms) throws IOException {
- throw new UnsupportedOperationException(UNSUPPORTED_MSG);
- }
-
- @Override
- public Explanation explain(Query query, int doc) throws IOException {
- throw new UnsupportedOperationException(UNSUPPORTED_MSG);
- }
-
- @Override
- public Similarity getSimilarity() {
- throw new UnsupportedOperationException(UNSUPPORTED_MSG);
- }
-
- @Override
- public void search(Query query, Collector results) throws IOException {
- throw new UnsupportedOperationException(UNSUPPORTED_MSG);
- }
-
- @Override
- public void search(Query query, Filter filter, Collector results)
- throws IOException {
- throw new UnsupportedOperationException(UNSUPPORTED_MSG);
- }
-
- @Override
- public TopDocs search(Query query, Filter filter, int n) throws IOException {
- throw new UnsupportedOperationException(UNSUPPORTED_MSG);
- }
-
- @Override
- public TopFieldDocs search(Query query, Filter filter, int n, Sort sort)
- throws IOException {
- throw new UnsupportedOperationException(UNSUPPORTED_MSG);
- }
-
- @Override
- public TopDocs search(Query query, int n) throws IOException {
- throw new UnsupportedOperationException(UNSUPPORTED_MSG);
- }
-
- @Override
- public void setSimilarity(Similarity similarity) {
- throw new UnsupportedOperationException(UNSUPPORTED_MSG);
- }
-
- @Override
- public int docFreq(Term term) throws IOException {
- throw new UnsupportedOperationException(UNSUPPORTED_MSG);
- }
-
- @Override
- public Explanation explain(Weight weight, int doc) throws IOException {
- throw new UnsupportedOperationException(UNSUPPORTED_MSG);
- }
-
- @Override
- public int maxDoc() throws IOException {
- throw new UnsupportedOperationException(UNSUPPORTED_MSG);
- }
-
- @Override
- public Query rewrite(Query query) throws IOException {
- throw new UnsupportedOperationException(UNSUPPORTED_MSG);
- }
-
- @Override
- public void search(Weight weight, Filter filter, Collector results)
- throws IOException {
- throw new UnsupportedOperationException(UNSUPPORTED_MSG);
- }
-
- @Override
- public TopDocs search(Weight weight, Filter filter, int n)
- throws IOException {
- throw new UnsupportedOperationException(UNSUPPORTED_MSG);
- }
-
- @Override
- public TopFieldDocs search(Weight weight, Filter filter, int n, Sort sort)
- throws IOException {
- throw new UnsupportedOperationException(UNSUPPORTED_MSG);
- }
-
- @Override
- public Document doc(int n, FieldSelector fieldSelector)
- throws CorruptIndexException, IOException {
- throw new UnsupportedOperationException(UNSUPPORTED_MSG);
- }
-
- }
-
static final class JustCompileCollector extends Collector {
@Override
diff --git a/lucene/src/test/org/apache/lucene/search/QueryUtils.java b/lucene/src/test/org/apache/lucene/search/QueryUtils.java
index f464301e216..c2c8b17fc52 100644
--- a/lucene/src/test/org/apache/lucene/search/QueryUtils.java
+++ b/lucene/src/test/org/apache/lucene/search/QueryUtils.java
@@ -85,7 +85,7 @@ public class QueryUtils {
}
/** deep check that explanations of a query 'score' correctly */
- public static void checkExplanations (final Query q, final Searcher s) throws IOException {
+ public static void checkExplanations (final Query q, final IndexSearcher s) throws IOException {
CheckHits.checkExplanations(q, null, s, true);
}
@@ -100,27 +100,19 @@ public class QueryUtils {
* @see #checkSerialization
* @see #checkEqual
*/
- public static void check(Random random, Query q1, Searcher s) {
+ public static void check(Random random, Query q1, IndexSearcher s) {
check(random, q1, s, true);
}
- private static void check(Random random, Query q1, Searcher s, boolean wrap) {
+ private static void check(Random random, Query q1, IndexSearcher s, boolean wrap) {
try {
check(q1);
if (s!=null) {
- if (s instanceof IndexSearcher) {
- IndexSearcher is = (IndexSearcher)s;
- checkFirstSkipTo(q1,is);
- checkSkipTo(q1,is);
- if (wrap) {
- check(random, q1, wrapUnderlyingReader(random, is, -1), false);
- check(random, q1, wrapUnderlyingReader(random, is, 0), false);
- check(random, q1, wrapUnderlyingReader(random, is, +1), false);
- }
- }
+ checkFirstSkipTo(q1,s);
+ checkSkipTo(q1,s);
if (wrap) {
- check(random,q1, wrapSearcher(random, s, -1), false);
- check(random,q1, wrapSearcher(random, s, 0), false);
- check(random,q1, wrapSearcher(random, s, +1), false);
+ check(random, q1, wrapUnderlyingReader(random, s, -1), false);
+ check(random, q1, wrapUnderlyingReader(random, s, 0), false);
+ check(random, q1, wrapUnderlyingReader(random, s, +1), false);
}
checkExplanations(q1,s);
checkSerialization(q1,s);
@@ -166,39 +158,6 @@ public class QueryUtils {
out.setSimilarity(s.getSimilarity());
return out;
}
- /**
- * Given a Searcher, returns a new MultiSearcher wrapping the
- * the original Searcher,
- * as well as several "empty" IndexSearchers -- some of which will have
- * deleted documents in them. This new MultiSearcher
- * should behave exactly the same as the original Searcher.
- * @param s the Searcher to wrap
- * @param edge if negative, s will be the first sub; if 0, s will be in hte middle, if positive s will be the last sub
- */
- public static MultiSearcher wrapSearcher(Random random, final Searcher s, final int edge)
- throws IOException {
-
- // we can't put deleted docs before the nested reader, because
- // it will through off the docIds
- Searcher[] searchers = new Searcher[] {
- edge < 0 ? s : new IndexSearcher(makeEmptyIndex(random, 0), true),
- new MultiSearcher(new Searcher[] {
- new IndexSearcher(makeEmptyIndex(random, edge < 0 ? 65 : 0), true),
- new IndexSearcher(makeEmptyIndex(random, 0), true),
- 0 == edge ? s : new IndexSearcher(makeEmptyIndex(random, 0), true)
- }),
- new IndexSearcher(makeEmptyIndex(random, 0 < edge ? 0 : 3), true),
- new IndexSearcher(makeEmptyIndex(random, 0), true),
- new MultiSearcher(new Searcher[] {
- new IndexSearcher(makeEmptyIndex(random, 0 < edge ? 0 : 5), true),
- new IndexSearcher(makeEmptyIndex(random, 0), true),
- 0 < edge ? s : new IndexSearcher(makeEmptyIndex(random, 0), true)
- })
- };
- MultiSearcher out = new MultiSearcher(searchers);
- out.setSimilarity(s.getSimilarity());
- return out;
- }
private static Directory makeEmptyIndex(Random random, final int numDeletedDocs)
throws IOException {
@@ -231,7 +190,7 @@ public class QueryUtils {
/** check that the query weight is serializable.
* @throws IOException if serialization check fail.
*/
- private static void checkSerialization(Query q, Searcher s) throws IOException {
+ private static void checkSerialization(Query q, IndexSearcher s) throws IOException {
Weight w = q.weight(s);
try {
ByteArrayOutputStream bos = new ByteArrayOutputStream();
diff --git a/lucene/src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java b/lucene/src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java
index 1f7abcc7074..55b067b6b78 100644
--- a/lucene/src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java
+++ b/lucene/src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java
@@ -372,7 +372,7 @@ public class TestBooleanMinShouldMatch extends LuceneTestCase {
- protected void printHits(String test, ScoreDoc[] h, Searcher searcher) throws Exception {
+ protected void printHits(String test, ScoreDoc[] h, IndexSearcher searcher) throws Exception {
System.err.println("------- " + test + " -------");
diff --git a/lucene/src/test/org/apache/lucene/search/TestConstantScoreQuery.java b/lucene/src/test/org/apache/lucene/search/TestConstantScoreQuery.java
index 167d722943b..874fc7f3a14 100644
--- a/lucene/src/test/org/apache/lucene/search/TestConstantScoreQuery.java
+++ b/lucene/src/test/org/apache/lucene/search/TestConstantScoreQuery.java
@@ -46,7 +46,7 @@ public class TestConstantScoreQuery extends LuceneTestCase {
QueryUtils.checkUnequal(q1, new TermQuery(new Term("a", "b")));
}
- private void checkHits(Searcher searcher, Query q, final float expectedScore, final String scorerClassName, final String innerScorerClassName) throws IOException {
+ private void checkHits(IndexSearcher searcher, Query q, final float expectedScore, final String scorerClassName, final String innerScorerClassName) throws IOException {
final int[] count = new int[1];
searcher.search(q, new Collector() {
private Scorer scorer;
diff --git a/lucene/src/test/org/apache/lucene/search/TestCustomSearcherSort.java b/lucene/src/test/org/apache/lucene/search/TestCustomSearcherSort.java
index 076a5571fb8..08781570e59 100644
--- a/lucene/src/test/org/apache/lucene/search/TestCustomSearcherSort.java
+++ b/lucene/src/test/org/apache/lucene/search/TestCustomSearcherSort.java
@@ -89,7 +89,7 @@ public class TestCustomSearcherSort extends LuceneTestCase implements Serializab
Sort custSort = new Sort(
new SortField("publicationDate_", SortField.STRING),
SortField.FIELD_SCORE);
- Searcher searcher = new CustomSearcher(reader, 2);
+ IndexSearcher searcher = new CustomSearcher(reader, 2);
// search and check hits
matchHits(searcher, custSort);
}
@@ -103,28 +103,13 @@ public class TestCustomSearcherSort extends LuceneTestCase implements Serializab
Sort custSort = new Sort(
new SortField("publicationDate_", SortField.STRING),
SortField.FIELD_SCORE);
- Searcher searcher = new MultiSearcher(new Searcher[] {new CustomSearcher(
- reader, 2)});
- // search and check hits
- matchHits(searcher, custSort);
- }
-
- /**
- * Run the test using two CustomSearcher instances.
- */
- public void testFieldSortMultiCustomSearcher() throws Exception {
- // log("Run testFieldSortMultiCustomSearcher");
- // define the sort criteria
- Sort custSort = new Sort(
- new SortField("publicationDate_", SortField.STRING),
- SortField.FIELD_SCORE);
- Searcher searcher = new MultiSearcher(new CustomSearcher(reader, 0), new CustomSearcher(reader, 2));
+ IndexSearcher searcher = new CustomSearcher(reader, 2);
// search and check hits
matchHits(searcher, custSort);
}
// make sure the documents returned by the search match the expected list
- private void matchHits(Searcher searcher, Sort sort) throws IOException {
+ private void matchHits(IndexSearcher searcher, Sort sort) throws IOException {
// make a query without sorting first
ScoreDoc[] hitsByRank = searcher.search(query, null, Integer.MAX_VALUE).scoreDocs;
checkHits(hitsByRank, "Sort by rank: "); // check for duplicates
diff --git a/lucene/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java b/lucene/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java
index cd56034e7d4..b89b6897c8a 100644
--- a/lucene/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java
+++ b/lucene/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java
@@ -473,7 +473,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase {
return q;
}
- protected void printHits(String test, ScoreDoc[] h, Searcher searcher)
+ protected void printHits(String test, ScoreDoc[] h, IndexSearcher searcher)
throws Exception {
System.err.println("------- " + test + " -------");
diff --git a/lucene/src/test/org/apache/lucene/search/TestMultiSearcher.java b/lucene/src/test/org/apache/lucene/search/TestMultiSearcher.java
deleted file mode 100644
index 91a06896648..00000000000
--- a/lucene/src/test/org/apache/lucene/search/TestMultiSearcher.java
+++ /dev/null
@@ -1,454 +0,0 @@
-package org.apache.lucene.search;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.SetBasedFieldSelector;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.index.IndexWriterConfig.OpenMode;
-import org.apache.lucene.queryParser.QueryParser;
-import org.apache.lucene.store.Directory;
-import java.io.IOException;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Random;
-import java.util.Set;
-
-/**
- * Tests {@link MultiSearcher} class.
- */
-public class TestMultiSearcher extends LuceneTestCase
-{
-
- /**
- * ReturnS a new instance of the concrete MultiSearcher class
- * used in this test.
- */
- protected MultiSearcher getMultiSearcherInstance(Searcher[] searchers) throws IOException {
- return new MultiSearcher(searchers);
- }
-
- public void testEmptyIndex() throws Exception {
- // creating two directories for indices
- Directory indexStoreA = newDirectory();
- Directory indexStoreB = newDirectory();
-
- // creating a document to store
- Document lDoc = new Document();
- lDoc.add(newField("fulltext", "Once upon a time.....", Field.Store.YES, Field.Index.ANALYZED));
- lDoc.add(newField("id", "doc1", Field.Store.YES, Field.Index.NOT_ANALYZED));
- lDoc.add(newField("handle", "1", Field.Store.YES, Field.Index.NOT_ANALYZED));
-
- // creating a document to store
- Document lDoc2 = new Document();
- lDoc2.add(newField("fulltext", "in a galaxy far far away.....",
- Field.Store.YES, Field.Index.ANALYZED));
- lDoc2.add(newField("id", "doc2", Field.Store.YES, Field.Index.NOT_ANALYZED));
- lDoc2.add(newField("handle", "1", Field.Store.YES, Field.Index.NOT_ANALYZED));
-
- // creating a document to store
- Document lDoc3 = new Document();
- lDoc3.add(newField("fulltext", "a bizarre bug manifested itself....",
- Field.Store.YES, Field.Index.ANALYZED));
- lDoc3.add(newField("id", "doc3", Field.Store.YES, Field.Index.NOT_ANALYZED));
- lDoc3.add(newField("handle", "1", Field.Store.YES, Field.Index.NOT_ANALYZED));
-
- // creating an index writer for the first index
- IndexWriter writerA = new IndexWriter(indexStoreA, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
- // creating an index writer for the second index, but writing nothing
- IndexWriter writerB = new IndexWriter(indexStoreB, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
-
- //--------------------------------------------------------------------
- // scenario 1
- //--------------------------------------------------------------------
-
- // writing the documents to the first index
- writerA.addDocument(lDoc);
- writerA.addDocument(lDoc2);
- writerA.addDocument(lDoc3);
- writerA.optimize();
- writerA.close();
-
- // closing the second index
- writerB.close();
-
- // creating the query
- QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, "fulltext", new MockAnalyzer());
- Query query = parser.parse("handle:1");
-
- // building the searchables
- Searcher[] searchers = new Searcher[2];
- // VITAL STEP:adding the searcher for the empty index first, before the searcher for the populated index
- searchers[0] = new IndexSearcher(indexStoreB, true);
- searchers[1] = new IndexSearcher(indexStoreA, true);
- // creating the multiSearcher
- Searcher mSearcher = getMultiSearcherInstance(searchers);
- // performing the search
- ScoreDoc[] hits = mSearcher.search(query, null, 1000).scoreDocs;
-
- assertEquals(3, hits.length);
-
- // iterating over the hit documents
- for (int i = 0; i < hits.length; i++) {
- mSearcher.doc(hits[i].doc);
- }
- mSearcher.close();
-
-
- //--------------------------------------------------------------------
- // scenario 2
- //--------------------------------------------------------------------
-
- // adding one document to the empty index
- writerB = new IndexWriter(indexStoreB, newIndexWriterConfig(
- TEST_VERSION_CURRENT,
- new MockAnalyzer())
- .setOpenMode(OpenMode.APPEND));
- writerB.addDocument(lDoc);
- writerB.optimize();
- writerB.close();
-
- // building the searchables
- Searcher[] searchers2 = new Searcher[2];
- // VITAL STEP:adding the searcher for the empty index first, before the searcher for the populated index
- searchers2[0] = new IndexSearcher(indexStoreB, true);
- searchers2[1] = new IndexSearcher(indexStoreA, true);
- // creating the mulitSearcher
- MultiSearcher mSearcher2 = getMultiSearcherInstance(searchers2);
- // performing the same search
- ScoreDoc[] hits2 = mSearcher2.search(query, null, 1000).scoreDocs;
-
- assertEquals(4, hits2.length);
-
- // iterating over the hit documents
- for (int i = 0; i < hits2.length; i++) {
- // no exception should happen at this point
- mSearcher2.doc(hits2[i].doc);
- }
-
- // test the subSearcher() method:
- Query subSearcherQuery = parser.parse("id:doc1");
- hits2 = mSearcher2.search(subSearcherQuery, null, 1000).scoreDocs;
- assertEquals(2, hits2.length);
- assertEquals(0, mSearcher2.subSearcher(hits2[0].doc)); // hit from searchers2[0]
- assertEquals(1, mSearcher2.subSearcher(hits2[1].doc)); // hit from searchers2[1]
- subSearcherQuery = parser.parse("id:doc2");
- hits2 = mSearcher2.search(subSearcherQuery, null, 1000).scoreDocs;
- assertEquals(1, hits2.length);
- assertEquals(1, mSearcher2.subSearcher(hits2[0].doc)); // hit from searchers2[1]
- mSearcher2.close();
-
- //--------------------------------------------------------------------
- // scenario 3
- //--------------------------------------------------------------------
-
- // deleting the document just added, this will cause a different exception to take place
- Term term = new Term("id", "doc1");
- IndexReader readerB = IndexReader.open(indexStoreB, false);
- readerB.deleteDocuments(term);
- readerB.close();
-
- // optimizing the index with the writer
- writerB = new IndexWriter(indexStoreB, new IndexWriterConfig(
- TEST_VERSION_CURRENT,
- new MockAnalyzer())
- .setOpenMode(OpenMode.APPEND));
- writerB.optimize();
- writerB.close();
-
- // building the searchables
- Searcher[] searchers3 = new Searcher[2];
-
- searchers3[0] = new IndexSearcher(indexStoreB, true);
- searchers3[1] = new IndexSearcher(indexStoreA, true);
- // creating the mulitSearcher
- Searcher mSearcher3 = getMultiSearcherInstance(searchers3);
- // performing the same search
- ScoreDoc[] hits3 = mSearcher3.search(query, null, 1000).scoreDocs;
-
- assertEquals(3, hits3.length);
-
- // iterating over the hit documents
- for (int i = 0; i < hits3.length; i++) {
- mSearcher3.doc(hits3[i].doc);
- }
- mSearcher3.close();
- indexStoreA.close();
- indexStoreB.close();
- }
-
- private Document createDocument(String contents1, String contents2) {
- Document document=new Document();
-
- document.add(newField("contents", contents1, Field.Store.YES, Field.Index.NOT_ANALYZED));
- document.add(newField("other", "other contents", Field.Store.YES, Field.Index.NOT_ANALYZED));
- if (contents2!=null) {
- document.add(newField("contents", contents2, Field.Store.YES, Field.Index.NOT_ANALYZED));
- }
-
- return document;
- }
-
- private void initIndex(Random random, Directory directory, int nDocs, boolean create, String contents2) throws IOException {
- IndexWriter indexWriter=null;
-
- try {
- indexWriter = new IndexWriter(directory, LuceneTestCase.newIndexWriterConfig(random,
- TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(
- create ? OpenMode.CREATE : OpenMode.APPEND));
-
- for (int i=0; i ftl = new HashSet();
- ftl.add("other");
- SetBasedFieldSelector fs = new SetBasedFieldSelector(ftl, Collections. emptySet());
- document = searcher.doc(hits[0].doc, fs);
- assertTrue("document is null and it shouldn't be", document != null);
- assertTrue("document.getFields() Size: " + document.getFields().size() + " is not: " + 1, document.getFields().size() == 1);
- String value = document.get("contents");
- assertTrue("value is not null and it should be", value == null);
- value = document.get("other");
- assertTrue("value is null and it shouldn't be", value != null);
- ftl.clear();
- ftl.add("contents");
- fs = new SetBasedFieldSelector(ftl, Collections. emptySet());
- document = searcher.doc(hits[1].doc, fs);
- value = document.get("contents");
- assertTrue("value is null and it shouldn't be", value != null);
- value = document.get("other");
- assertTrue("value is not null and it should be", value == null);
- indexSearcher1.close();
- indexSearcher2.close();
- ramDirectory1.close();
- ramDirectory2.close();
- searcher.close();
- }
-
- /* uncomment this when the highest score is always normalized to 1.0, even when it was < 1.0
- public void testNormalization1() throws IOException {
- testNormalization(1, "Using 1 document per index:");
- }
- */
-
- public void testNormalization10() throws IOException {
- testNormalization(10, "Using 10 documents per index:");
- }
-
- private void testNormalization(int nDocs, String message) throws IOException {
- Query query=new TermQuery(new Term("contents", "doc0"));
-
- Directory ramDirectory1;
- IndexSearcher indexSearcher1;
- ScoreDoc[] hits;
-
- ramDirectory1=newDirectory();
-
- // First put the documents in the same index
- initIndex(random, ramDirectory1, nDocs, true, null); // documents with a single token "doc0", "doc1", etc...
- initIndex(random, ramDirectory1, nDocs, false, "x"); // documents with two tokens "doc0" and "x", "doc1" and x, etc...
-
- indexSearcher1=new IndexSearcher(ramDirectory1, true);
- indexSearcher1.setDefaultFieldSortScoring(true, true);
-
- hits=indexSearcher1.search(query, null, 1000).scoreDocs;
-
- assertEquals(message, 2, hits.length);
-
- // Store the scores for use later
- float[] scores={ hits[0].score, hits[1].score };
-
- assertTrue(message, scores[0] > scores[1]);
-
- indexSearcher1.close();
- ramDirectory1.close();
- hits=null;
-
-
-
- Directory ramDirectory2;
- IndexSearcher indexSearcher2;
-
- ramDirectory1=newDirectory();
- ramDirectory2=newDirectory();
-
- // Now put the documents in a different index
- initIndex(random, ramDirectory1, nDocs, true, null); // documents with a single token "doc0", "doc1", etc...
- initIndex(random, ramDirectory2, nDocs, true, "x"); // documents with two tokens "doc0" and "x", "doc1" and x, etc...
-
- indexSearcher1=new IndexSearcher(ramDirectory1, true);
- indexSearcher1.setDefaultFieldSortScoring(true, true);
- indexSearcher2=new IndexSearcher(ramDirectory2, true);
- indexSearcher2.setDefaultFieldSortScoring(true, true);
-
- Searcher searcher=getMultiSearcherInstance(new Searcher[] { indexSearcher1, indexSearcher2 });
-
- hits=searcher.search(query, null, 1000).scoreDocs;
-
- assertEquals(message, 2, hits.length);
-
- // The scores should be the same (within reason)
- assertEquals(message, scores[0], hits[0].score, 1e-6); // This will a document from ramDirectory1
- assertEquals(message, scores[1], hits[1].score, 1e-6); // This will a document from ramDirectory2
-
-
-
- // Adding a Sort.RELEVANCE object should not change anything
- hits=searcher.search(query, null, 1000, Sort.RELEVANCE).scoreDocs;
-
- assertEquals(message, 2, hits.length);
-
- assertEquals(message, scores[0], hits[0].score, 1e-6); // This will a document from ramDirectory1
- assertEquals(message, scores[1], hits[1].score, 1e-6); // This will a document from ramDirectory2
-
- searcher.close();
-
- ramDirectory1.close();
- ramDirectory2.close();
- }
-
- /**
- * test that custom similarity is in effect when using MultiSearcher (LUCENE-789).
- * @throws IOException
- */
- public void testCustomSimilarity () throws IOException {
- Directory dir = newDirectory();
- initIndex(random, dir, 10, true, "x"); // documents with two tokens "doc0" and "x", "doc1" and x, etc...
- IndexSearcher srchr = new IndexSearcher(dir, true);
- MultiSearcher msrchr = getMultiSearcherInstance(new Searcher[]{srchr});
-
- Similarity customSimilarity = new DefaultSimilarity() {
- // overide all
- @Override
- public float idf(int docFreq, int numDocs) { return 100.0f; }
- @Override
- public float coord(int overlap, int maxOverlap) { return 1.0f; }
- @Override
- public float lengthNorm(String fieldName, int numTokens) { return 1.0f; }
- @Override
- public float queryNorm(float sumOfSquaredWeights) { return 1.0f; }
- @Override
- public float sloppyFreq(int distance) { return 1.0f; }
- @Override
- public float tf(float freq) { return 1.0f; }
- };
-
- srchr.setSimilarity(customSimilarity);
- msrchr.setSimilarity(customSimilarity);
-
- Query query=new TermQuery(new Term("contents", "doc0"));
-
- // Get a score from IndexSearcher
- TopDocs topDocs = srchr.search(query, null, 1);
- float score1 = topDocs.getMaxScore();
-
- // Get the score from MultiSearcher
- topDocs = msrchr.search(query, null, 1);
- float scoreN = topDocs.getMaxScore();
-
- // The scores from the IndexSearcher and Multisearcher should be the same
- // if the same similarity is used.
- assertEquals("MultiSearcher score must be equal to single searcher score!", score1, scoreN, 1e-6);
- msrchr.close();
- srchr.close();
- dir.close();
- }
-
- public void testDocFreq() throws IOException{
- Directory dir1 = newDirectory();
- Directory dir2 = newDirectory();
-
- initIndex(random, dir1, 10, true, "x"); // documents with two tokens "doc0" and "x", "doc1" and x, etc...
- initIndex(random, dir2, 5, true, "x"); // documents with two tokens "doc0" and "x", "doc1" and x, etc...
- IndexSearcher searcher1 = new IndexSearcher(dir1, true);
- IndexSearcher searcher2 = new IndexSearcher(dir2, true);
-
- MultiSearcher multiSearcher = getMultiSearcherInstance(new Searcher[]{searcher1, searcher2});
- assertEquals(15, multiSearcher.docFreq(new Term("contents","x")));
- multiSearcher.close();
- searcher1.close();
- searcher2.close();
- dir1.close();
- dir2.close();
- }
-
- public void testCreateDocFrequencyMap() throws IOException{
- Directory dir1 = newDirectory();
- Directory dir2 = newDirectory();
- Term template = new Term("contents") ;
- String[] contents = {"a", "b", "c"};
- HashSet termsSet = new HashSet();
- for (int i = 0; i < contents.length; i++) {
- initIndex(random, dir1, i+10, i==0, contents[i]);
- initIndex(random, dir2, i+5, i==0, contents[i]);
- termsSet.add(template.createTerm(contents[i]));
- }
- IndexSearcher searcher1 = new IndexSearcher(dir1, true);
- IndexSearcher searcher2 = new IndexSearcher(dir2, true);
- MultiSearcher multiSearcher = getMultiSearcherInstance(new Searcher[]{searcher1, searcher2});
- Map docFrequencyMap = multiSearcher.createDocFrequencyMap(termsSet);
- assertEquals(3, docFrequencyMap.size());
- for (int i = 0; i < contents.length; i++) {
- assertEquals(Integer.valueOf((i*2) +15), docFrequencyMap.get(template.createTerm(contents[i])));
- }
- multiSearcher.close();
- searcher1.close();
- searcher2.close();
- dir1.close();
- dir2.close();
- }
-}
diff --git a/lucene/src/test/org/apache/lucene/search/TestMultiSearcherRanking.java b/lucene/src/test/org/apache/lucene/search/TestMultiSearcherRanking.java
deleted file mode 100644
index 04972f0b8f6..00000000000
--- a/lucene/src/test/org/apache/lucene/search/TestMultiSearcherRanking.java
+++ /dev/null
@@ -1,173 +0,0 @@
-package org.apache.lucene.search;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.queryParser.ParseException;
-import org.apache.lucene.queryParser.QueryParser;
-import org.apache.lucene.store.Directory;
-import java.io.IOException;
-
-/**
- * Tests {@link MultiSearcher} ranking, i.e. makes sure this bug is fixed:
- * http://issues.apache.org/bugzilla/show_bug.cgi?id=31841
- *
- */
-public class TestMultiSearcherRanking extends LuceneTestCase {
-
- private final String FIELD_NAME = "body";
- private Searcher multiSearcher;
- private Searcher singleSearcher;
-
- public void testOneTermQuery() throws IOException, ParseException {
- checkQuery("three");
- }
-
- public void testTwoTermQuery() throws IOException, ParseException {
- checkQuery("three foo");
- }
-
- public void testPrefixQuery() throws IOException, ParseException {
- checkQuery("multi*");
- }
-
- public void testFuzzyQuery() throws IOException, ParseException {
- checkQuery("multiThree~");
- }
-
- public void testRangeQuery() throws IOException, ParseException {
- checkQuery("{multiA TO multiP}");
- }
-
- public void testMultiPhraseQuery() throws IOException, ParseException {
- checkQuery("\"blueberry pi*\"");
- }
-
- public void testNoMatchQuery() throws IOException, ParseException {
- checkQuery("+three +nomatch");
- }
-
- /*
- public void testTermRepeatedQuery() throws IOException, ParseException {
- // TODO: this corner case yields different results.
- checkQuery("multi* multi* foo");
- }
- */
-
- /**
- * checks if a query yields the same result when executed on
- * a single IndexSearcher containing all documents and on a
- * MultiSearcher aggregating sub-searchers
- * @param queryStr the query to check.
- * @throws IOException
- * @throws ParseException
- */
- private void checkQuery(String queryStr) throws IOException, ParseException {
- // check result hit ranking
- if(VERBOSE) System.out.println("Query: " + queryStr);
- QueryParser queryParser = new QueryParser(TEST_VERSION_CURRENT, FIELD_NAME, new MockAnalyzer());
- Query query = queryParser.parse(queryStr);
- ScoreDoc[] multiSearcherHits = multiSearcher.search(query, null, 1000).scoreDocs;
- ScoreDoc[] singleSearcherHits = singleSearcher.search(query, null, 1000).scoreDocs;
- assertEquals(multiSearcherHits.length, singleSearcherHits.length);
- for (int i = 0; i < multiSearcherHits.length; i++) {
- Document docMulti = multiSearcher.doc(multiSearcherHits[i].doc);
- Document docSingle = singleSearcher.doc(singleSearcherHits[i].doc);
- if(VERBOSE) System.out.println("Multi: " + docMulti.get(FIELD_NAME) + " score="
- + multiSearcherHits[i].score);
- if(VERBOSE) System.out.println("Single: " + docSingle.get(FIELD_NAME) + " score="
- + singleSearcherHits[i].score);
- assertEquals(multiSearcherHits[i].score, singleSearcherHits[i].score,
- 0.001f);
- assertEquals(docMulti.get(FIELD_NAME), docSingle.get(FIELD_NAME));
- }
- if(VERBOSE) System.out.println();
- }
-
- /**
- * initializes multiSearcher and singleSearcher with the same document set
- */
- @Override
- public void setUp() throws Exception {
- super.setUp();
- // create MultiSearcher from two seperate searchers
- d1 = newDirectory();
- IndexWriter iw1 = new IndexWriter(d1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
- addCollection1(iw1);
- iw1.close();
- d2 = newDirectory();
- IndexWriter iw2 = new IndexWriter(d2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
- addCollection2(iw2);
- iw2.close();
-
- Searchable[] s = new Searchable[2];
- s[0] = new IndexSearcher(d1, true);
- s[1] = new IndexSearcher(d2, true);
- multiSearcher = new MultiSearcher(s);
-
- // create IndexSearcher which contains all documents
- d = newDirectory();
- IndexWriter iw = new IndexWriter(d, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
- addCollection1(iw);
- addCollection2(iw);
- iw.close();
- singleSearcher = new IndexSearcher(d, true);
- }
-
- Directory d1, d2, d;
-
- @Override
- public void tearDown() throws Exception {
- multiSearcher.close();
- singleSearcher.close();
- d1.close();
- d2.close();
- d.close();
- super.tearDown();
- }
-
- private void addCollection1(IndexWriter iw) throws IOException {
- add("one blah three", iw);
- add("one foo three multiOne", iw);
- add("one foobar three multiThree", iw);
- add("blueberry pie", iw);
- add("blueberry strudel", iw);
- add("blueberry pizza", iw);
- }
-
- private void addCollection2(IndexWriter iw) throws IOException {
- add("two blah three", iw);
- add("two foo xxx multiTwo", iw);
- add("two foobar xxx multiThreee", iw);
- add("blueberry chewing gum", iw);
- add("bluebird pizza", iw);
- add("bluebird foobar pizza", iw);
- add("piccadilly circus", iw);
- }
-
- private void add(String value, IndexWriter iw) throws IOException {
- Document d = new Document();
- d.add(newField(FIELD_NAME, value, Field.Store.YES, Field.Index.ANALYZED));
- iw.addDocument(d);
- }
-
-}
diff --git a/lucene/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java b/lucene/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java
index fe5d5c179ad..df996e604e3 100644
--- a/lucene/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java
+++ b/lucene/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java
@@ -59,7 +59,7 @@ public class TestMultiValuedNumericRangeQuery extends LuceneTestCase {
IndexReader reader = writer.getReader();
writer.close();
- Searcher searcher=new IndexSearcher(reader);
+ IndexSearcher searcher=new IndexSearcher(reader);
num = 50 * RANDOM_MULTIPLIER;
for (int i = 0; i < num; i++) {
int lower=random.nextInt(Integer.MAX_VALUE);
diff --git a/lucene/src/test/org/apache/lucene/search/TestNot.java b/lucene/src/test/org/apache/lucene/search/TestNot.java
index a60b8edc20b..20f2d8f61a0 100644
--- a/lucene/src/test/org/apache/lucene/search/TestNot.java
+++ b/lucene/src/test/org/apache/lucene/search/TestNot.java
@@ -44,7 +44,7 @@ public class TestNot extends LuceneTestCase {
writer.addDocument(d1);
IndexReader reader = writer.getReader();
- Searcher searcher = new IndexSearcher(reader);
+ IndexSearcher searcher = new IndexSearcher(reader);
QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer());
Query query = parser.parse("a NOT b");
//System.out.println(query);
diff --git a/lucene/src/test/org/apache/lucene/search/TestParallelMultiSearcher.java b/lucene/src/test/org/apache/lucene/search/TestParallelMultiSearcher.java
deleted file mode 100644
index 44e35cab08b..00000000000
--- a/lucene/src/test/org/apache/lucene/search/TestParallelMultiSearcher.java
+++ /dev/null
@@ -1,51 +0,0 @@
-package org.apache.lucene.search;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.lucene.util._TestUtil;
-
-/**
- * Unit tests for the ParallelMultiSearcher
- */
-public class TestParallelMultiSearcher extends TestMultiSearcher {
- List pools = new ArrayList();
-
- @Override
- public void tearDown() throws Exception {
- for (ExecutorService exec : pools)
- exec.awaitTermination(1000, TimeUnit.MILLISECONDS);
- pools.clear();
- super.tearDown();
- }
-
- @Override
- protected MultiSearcher getMultiSearcherInstance(Searcher[] searchers)
- throws IOException {
- ExecutorService exec = Executors.newFixedThreadPool(_TestUtil.nextInt(random, 2, 8));
- pools.add(exec);
- return new ParallelMultiSearcher(exec, searchers);
- }
-
-}
diff --git a/lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java b/lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java
index cd5595b964b..a9e9c78a320 100644
--- a/lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java
+++ b/lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java
@@ -352,7 +352,7 @@ public class TestPhraseQuery extends LuceneTestCase {
IndexReader reader = writer.getReader();
writer.close();
- Searcher searcher = new IndexSearcher(reader);
+ IndexSearcher searcher = new IndexSearcher(reader);
PhraseQuery query = new PhraseQuery();
query.add(new Term("field", "firstname"));
query.add(new Term("field", "lastname"));
diff --git a/lucene/src/test/org/apache/lucene/search/TestRegexpRandom.java b/lucene/src/test/org/apache/lucene/search/TestRegexpRandom.java
index 5dbbddd010f..d50a02400d9 100644
--- a/lucene/src/test/org/apache/lucene/search/TestRegexpRandom.java
+++ b/lucene/src/test/org/apache/lucene/search/TestRegexpRandom.java
@@ -38,7 +38,7 @@ import org.apache.lucene.util._TestUtil;
* and validates the correct number of hits are returned.
*/
public class TestRegexpRandom extends LuceneTestCase {
- private Searcher searcher;
+ private IndexSearcher searcher;
private IndexReader reader;
private Directory dir;
diff --git a/lucene/src/test/org/apache/lucene/search/TestSimilarity.java b/lucene/src/test/org/apache/lucene/search/TestSimilarity.java
index c516940f889..9518a025ed9 100644
--- a/lucene/src/test/org/apache/lucene/search/TestSimilarity.java
+++ b/lucene/src/test/org/apache/lucene/search/TestSimilarity.java
@@ -44,7 +44,7 @@ public class TestSimilarity extends LuceneTestCase {
@Override public float sloppyFreq(int distance) { return 2.0f; }
@Override public float idf(int docFreq, int numDocs) { return 1.0f; }
@Override public float coord(int overlap, int maxOverlap) { return 1.0f; }
- @Override public IDFExplanation idfExplain(Collection terms, Searcher searcher) throws IOException {
+ @Override public IDFExplanation idfExplain(Collection terms, IndexSearcher searcher) throws IOException {
return new IDFExplanation() {
@Override
public float getIdf() {
@@ -75,7 +75,7 @@ public class TestSimilarity extends LuceneTestCase {
IndexReader reader = writer.getReader();
writer.close();
- Searcher searcher = new IndexSearcher(reader);
+ IndexSearcher searcher = new IndexSearcher(reader);
searcher.setSimilarity(new SimpleSimilarity());
Term a = new Term("field", "a");
diff --git a/lucene/src/test/org/apache/lucene/search/TestSimpleExplanations.java b/lucene/src/test/org/apache/lucene/search/TestSimpleExplanations.java
index 0e26dabfe16..116b10a6e20 100644
--- a/lucene/src/test/org/apache/lucene/search/TestSimpleExplanations.java
+++ b/lucene/src/test/org/apache/lucene/search/TestSimpleExplanations.java
@@ -17,18 +17,6 @@ package org.apache.lucene.search;
* limitations under the License.
*/
-import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.queryParser.QueryParser;
-import org.apache.lucene.search.spans.SpanNearQuery;
-import org.apache.lucene.search.spans.SpanQuery;
-import org.apache.lucene.search.spans.SpanTermQuery;
-import org.apache.lucene.store.Directory;
-
-
/**
* TestExplanations subclass focusing on basic query types
*/
@@ -301,73 +289,4 @@ public class TestSimpleExplanations extends TestExplanations {
qtest(q, new int[] { 0,3 });
}
-
-
- public void testTermQueryMultiSearcherExplain() throws Exception {
- // creating two directories for indices
- Directory indexStoreA = newDirectory();
- Directory indexStoreB = newDirectory();
-
- Document lDoc = new Document();
- lDoc.add(newField("handle", "1 2", Field.Store.YES, Field.Index.ANALYZED));
- Document lDoc2 = new Document();
- lDoc2.add(newField("handle", "1 2", Field.Store.YES, Field.Index.ANALYZED));
- Document lDoc3 = new Document();
- lDoc3.add(newField("handle", "1 2", Field.Store.YES, Field.Index.ANALYZED));
-
- IndexWriter writerA = new IndexWriter(indexStoreA, newIndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer()));
- IndexWriter writerB = new IndexWriter(indexStoreB, newIndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer()));
-
- writerA.addDocument(lDoc);
- writerA.addDocument(lDoc2);
- writerA.optimize();
- writerA.close();
-
- writerB.addDocument(lDoc3);
- writerB.close();
-
- QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, "fulltext", new MockAnalyzer());
- Query query = parser.parse("handle:1");
-
- Searcher[] searchers = new Searcher[2];
- searchers[0] = new IndexSearcher(indexStoreB, true);
- searchers[1] = new IndexSearcher(indexStoreA, true);
- Searcher mSearcher = new MultiSearcher(searchers);
- ScoreDoc[] hits = mSearcher.search(query, null, 1000).scoreDocs;
-
- assertEquals(3, hits.length);
-
- Explanation explain = mSearcher.explain(query, hits[0].doc);
- String exp = explain.toString(0);
- assertTrue(exp, exp.indexOf("maxDocs=3") > -1);
- assertTrue(exp, exp.indexOf("docFreq=3") > -1);
-
- query = parser.parse("handle:\"1 2\"");
- hits = mSearcher.search(query, null, 1000).scoreDocs;
-
- assertEquals(3, hits.length);
-
- explain = mSearcher.explain(query, hits[0].doc);
- exp = explain.toString(0);
- assertTrue(exp, exp.indexOf("1=3") > -1);
- assertTrue(exp, exp.indexOf("2=3") > -1);
-
- query = new SpanNearQuery(new SpanQuery[] {
- new SpanTermQuery(new Term("handle", "1")),
- new SpanTermQuery(new Term("handle", "2")) }, 0, true);
- hits = mSearcher.search(query, null, 1000).scoreDocs;
-
- assertEquals(3, hits.length);
-
- explain = mSearcher.explain(query, hits[0].doc);
- exp = explain.toString(0);
- assertTrue(exp, exp.indexOf("1=3") > -1);
- assertTrue(exp, exp.indexOf("2=3") > -1);
- mSearcher.close();
- indexStoreA.close();
- indexStoreB.close();
- }
-
}
diff --git a/lucene/src/test/org/apache/lucene/search/TestSort.java b/lucene/src/test/org/apache/lucene/search/TestSort.java
index 62a164c8e6b..a4b22d654e0 100644
--- a/lucene/src/test/org/apache/lucene/search/TestSort.java
+++ b/lucene/src/test/org/apache/lucene/search/TestSort.java
@@ -36,9 +36,9 @@ import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.MultiReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
-import org.apache.lucene.util.BytesRef;
import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.FieldValueHitQueue.Entry;
@@ -49,8 +49,9 @@ import org.apache.lucene.search.cache.FloatValuesCreator;
import org.apache.lucene.search.cache.IntValuesCreator;
import org.apache.lucene.search.cache.LongValuesCreator;
import org.apache.lucene.search.cache.ShortValuesCreator;
-import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.LockObtainFailedException;
+import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.DocIdBitSet;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util._TestUtil;
@@ -456,7 +457,7 @@ public class TestSort extends LuceneTestCase implements Serializable {
// test sorts when there's nothing in the index
public void testEmptyIndex() throws Exception {
- Searcher empty = getEmptyIndex();
+ IndexSearcher empty = getEmptyIndex();
sort = new Sort();
assertMatches (empty, queryX, sort, "");
@@ -599,23 +600,9 @@ public class TestSort extends LuceneTestCase implements Serializable {
new SortField ("float", SortField.FLOAT, true) );
assertMatches (full, queryG, sort, "ZYXW");
- // Do the same for a MultiSearcher
- Searcher multiSearcher=new MultiSearcher (full);
-
- sort.setSort (new SortField ("int", SortField.INT),
- new SortField ("string", SortField.STRING),
- new SortField ("float", SortField.FLOAT) );
- assertMatches (multiSearcher, queryG, sort, "ZWXY");
-
- sort.setSort (new SortField ("int", SortField.INT),
- new SortField ("string", SortField.STRING),
- new SortField ("float", SortField.FLOAT, true) );
- assertMatches (multiSearcher, queryG, sort, "ZYXW");
- // Don't close the multiSearcher. it would close the full searcher too!
-
// Do the same for a ParallelMultiSearcher
ExecutorService exec = Executors.newFixedThreadPool(_TestUtil.nextInt(random, 2, 8));
- Searcher parallelSearcher=new ParallelMultiSearcher (exec, full);
+ IndexSearcher parallelSearcher=new IndexSearcher (full.getIndexReader(), exec);
sort.setSort (new SortField ("int", SortField.INT),
new SortField ("string", SortField.STRING),
@@ -627,6 +614,7 @@ public class TestSort extends LuceneTestCase implements Serializable {
new SortField ("float", SortField.FLOAT, true) );
assertMatches (parallelSearcher, queryG, sort, "ZYXW");
parallelSearcher.close();
+ exec.shutdown();
exec.awaitTermination(1000, TimeUnit.MILLISECONDS);
}
@@ -672,117 +660,19 @@ public class TestSort extends LuceneTestCase implements Serializable {
assertMatches (full, queryX, sort, "EACGI");
}
- // Test the MultiSearcher's ability to preserve locale-sensitive ordering
- // by wrapping it around a single searcher
- public void testInternationalMultiSearcherSort() throws Exception {
- Searcher multiSearcher = new MultiSearcher (full);
-
- sort.setSort (new SortField ("i18n", new Locale("sv", "se")));
- assertMatches (multiSearcher, queryY, sort, "BJDFH");
-
- sort.setSort (new SortField ("i18n", Locale.US));
- assertMatches (multiSearcher, queryY, sort, oStrokeFirst ? "BFJHD" : "BFJDH");
-
- sort.setSort (new SortField ("i18n", new Locale("da", "dk")));
- assertMatches (multiSearcher, queryY, sort, "BJDHF");
- }
-
- // test a variety of sorts using more than one searcher
- public void testMultiSort() throws Exception {
- MultiSearcher searcher = new MultiSearcher (searchX, searchY);
- runMultiSorts(searcher, false);
- }
-
// test a variety of sorts using a parallel multisearcher
public void testParallelMultiSort() throws Exception {
ExecutorService exec = Executors.newFixedThreadPool(_TestUtil.nextInt(random, 2, 8));
- Searcher searcher = new ParallelMultiSearcher (exec, searchX, searchY);
+ IndexSearcher searcher = new IndexSearcher(
+ new MultiReader(
+ new IndexReader[] {searchX.getIndexReader(),
+ searchY.getIndexReader()}), exec);
runMultiSorts(searcher, false);
searcher.close();
+ exec.shutdown();
exec.awaitTermination(1000, TimeUnit.MILLISECONDS);
}
- // test that the relevancy scores are the same even if
- // hits are sorted
- public void testNormalizedScores() throws Exception {
-
- // capture relevancy scores
- HashMap scoresX = getScores (full.search (queryX, null, 1000).scoreDocs, full);
- HashMap scoresY = getScores (full.search (queryY, null, 1000).scoreDocs, full);
- HashMap scoresA = getScores (full.search (queryA, null, 1000).scoreDocs, full);
-
- // we'll test searching locally, remote and multi
-
- MultiSearcher multi = new MultiSearcher (searchX, searchY);
-
- // change sorting and make sure relevancy stays the same
-
- sort = new Sort();
- assertSameValues (scoresX, getScores (full.search (queryX, null, 1000, sort).scoreDocs, full));
- assertSameValues (scoresX, getScores (multi.search (queryX, null, 1000, sort).scoreDocs, multi));
- assertSameValues (scoresY, getScores (full.search (queryY, null, 1000, sort).scoreDocs, full));
- assertSameValues (scoresY, getScores (multi.search (queryY, null, 1000, sort).scoreDocs, multi));
- assertSameValues (scoresA, getScores (full.search (queryA, null, 1000, sort).scoreDocs, full));
- assertSameValues (scoresA, getScores (multi.search (queryA, null, 1000, sort).scoreDocs, multi));
-
- sort.setSort(SortField.FIELD_DOC);
- assertSameValues (scoresX, getScores (full.search (queryX, null, 1000, sort).scoreDocs, full));
- assertSameValues (scoresX, getScores (multi.search (queryX, null, 1000, sort).scoreDocs, multi));
- assertSameValues (scoresY, getScores (full.search (queryY, null, 1000, sort).scoreDocs, full));
- assertSameValues (scoresY, getScores (multi.search (queryY, null, 1000, sort).scoreDocs, multi));
- assertSameValues (scoresA, getScores (full.search (queryA, null, 1000, sort).scoreDocs, full));
- assertSameValues (scoresA, getScores (multi.search (queryA, null, 1000, sort).scoreDocs, multi));
-
- sort.setSort (new SortField("int", SortField.INT));
- assertSameValues (scoresX, getScores (full.search (queryX, null, 1000, sort).scoreDocs, full));
- assertSameValues (scoresX, getScores (multi.search (queryX, null, 1000, sort).scoreDocs, multi));
- assertSameValues (scoresY, getScores (full.search (queryY, null, 1000, sort).scoreDocs, full));
- assertSameValues (scoresY, getScores (multi.search (queryY, null, 1000, sort).scoreDocs, multi));
- assertSameValues (scoresA, getScores (full.search (queryA, null, 1000, sort).scoreDocs, full));
- assertSameValues (scoresA, getScores (multi.search (queryA, null, 1000, sort).scoreDocs, multi));
-
- sort.setSort (new SortField("float", SortField.FLOAT));
- assertSameValues (scoresX, getScores (full.search (queryX, null, 1000, sort).scoreDocs, full));
- assertSameValues (scoresX, getScores (multi.search (queryX, null, 1000, sort).scoreDocs, multi));
- assertSameValues (scoresY, getScores (full.search (queryY, null, 1000, sort).scoreDocs, full));
- assertSameValues (scoresY, getScores (multi.search (queryY, null, 1000, sort).scoreDocs, multi));
- assertSameValues (scoresA, getScores (full.search (queryA, null, 1000, sort).scoreDocs, full));
- assertSameValues (scoresA, getScores (multi.search (queryA, null, 1000, sort).scoreDocs, multi));
-
- sort.setSort (new SortField("string", SortField.STRING));
- assertSameValues (scoresX, getScores (full.search (queryX, null, 1000, sort).scoreDocs, full));
- assertSameValues (scoresX, getScores (multi.search (queryX, null, 1000, sort).scoreDocs, multi));
- assertSameValues (scoresY, getScores (full.search (queryY, null, 1000, sort).scoreDocs, full));
- assertSameValues (scoresY, getScores (multi.search (queryY, null, 1000, sort).scoreDocs, multi));
- assertSameValues (scoresA, getScores (full.search (queryA, null, 1000, sort).scoreDocs, full));
- assertSameValues (scoresA, getScores (multi.search (queryA, null, 1000, sort).scoreDocs, multi));
-
- sort.setSort (new SortField("int", SortField.INT),new SortField("float", SortField.FLOAT));
- assertSameValues (scoresX, getScores (full.search (queryX, null, 1000, sort).scoreDocs, full));
- assertSameValues (scoresX, getScores (multi.search (queryX, null, 1000, sort).scoreDocs, multi));
- assertSameValues (scoresY, getScores (full.search (queryY, null, 1000, sort).scoreDocs, full));
- assertSameValues (scoresY, getScores (multi.search (queryY, null, 1000, sort).scoreDocs, multi));
- assertSameValues (scoresA, getScores (full.search (queryA, null, 1000, sort).scoreDocs, full));
- assertSameValues (scoresA, getScores (multi.search (queryA, null, 1000, sort).scoreDocs, multi));
-
- sort.setSort (new SortField ("int", SortField.INT, true), new SortField (null, SortField.DOC, true) );
- assertSameValues (scoresX, getScores (full.search (queryX, null, 1000, sort).scoreDocs, full));
- assertSameValues (scoresX, getScores (multi.search (queryX, null, 1000, sort).scoreDocs, multi));
- assertSameValues (scoresY, getScores (full.search (queryY, null, 1000, sort).scoreDocs, full));
- assertSameValues (scoresY, getScores (multi.search (queryY, null, 1000, sort).scoreDocs, multi));
- assertSameValues (scoresA, getScores (full.search (queryA, null, 1000, sort).scoreDocs, full));
- assertSameValues (scoresA, getScores (multi.search (queryA, null, 1000, sort).scoreDocs, multi));
-
- sort.setSort (new SortField("int", SortField.INT),new SortField("string", SortField.STRING));
- assertSameValues (scoresX, getScores (full.search (queryX, null, 1000, sort).scoreDocs, full));
- assertSameValues (scoresX, getScores (multi.search (queryX, null, 1000, sort).scoreDocs, multi));
- assertSameValues (scoresY, getScores (full.search (queryY, null, 1000, sort).scoreDocs, full));
- assertSameValues (scoresY, getScores (multi.search (queryY, null, 1000, sort).scoreDocs, multi));
- assertSameValues (scoresA, getScores (full.search (queryA, null, 1000, sort).scoreDocs, full));
- assertSameValues (scoresA, getScores (multi.search (queryA, null, 1000, sort).scoreDocs, multi));
-
- }
-
public void testTopDocsScores() throws Exception {
// There was previously a bug in FieldSortedHitQueue.maxscore when only a single
@@ -1024,7 +914,7 @@ public class TestSort extends LuceneTestCase implements Serializable {
}
// runs a variety of sorts useful for multisearchers
- private void runMultiSorts(Searcher multi, boolean isFull) throws Exception {
+ private void runMultiSorts(IndexSearcher multi, boolean isFull) throws Exception {
sort.setSort(SortField.FIELD_DOC);
String expected = isFull ? "ABCDEFGHIJ" : "ACEGIBDFHJ";
assertMatches(multi, queryA, sort, expected);
@@ -1101,12 +991,12 @@ public class TestSort extends LuceneTestCase implements Serializable {
}
- private void assertMatches(Searcher searcher, Query query, Sort sort, String expectedResult) throws IOException {
+ private void assertMatches(IndexSearcher searcher, Query query, Sort sort, String expectedResult) throws IOException {
assertMatches( null, searcher, query, sort, expectedResult );
}
// make sure the documents returned by the search match the expected list
- private void assertMatches(String msg, Searcher searcher, Query query, Sort sort,
+ private void assertMatches(String msg, IndexSearcher searcher, Query query, Sort sort,
String expectedResult) throws IOException {
//ScoreDoc[] result = searcher.search (query, null, 1000, sort).scoreDocs;
TopDocs hits = searcher.search (query, null, Math.max(1, expectedResult.length()), sort);
@@ -1124,7 +1014,7 @@ public class TestSort extends LuceneTestCase implements Serializable {
assertEquals (msg, expectedResult, buff.toString());
}
- private HashMap getScores (ScoreDoc[] hits, Searcher searcher)
+ private HashMap getScores (ScoreDoc[] hits, IndexSearcher searcher)
throws IOException {
HashMap scoreMap = new HashMap();
int n = hits.length;
diff --git a/lucene/src/test/org/apache/lucene/search/TestTermRangeQuery.java b/lucene/src/test/org/apache/lucene/search/TestTermRangeQuery.java
index f5e5eda10e3..631337ee7c2 100644
--- a/lucene/src/test/org/apache/lucene/search/TestTermRangeQuery.java
+++ b/lucene/src/test/org/apache/lucene/search/TestTermRangeQuery.java
@@ -134,7 +134,7 @@ public class TestTermRangeQuery extends LuceneTestCase {
searcher.close();
}
- private void checkBooleanTerms(Searcher searcher, TermRangeQuery query, String... terms) throws IOException {
+ private void checkBooleanTerms(IndexSearcher searcher, TermRangeQuery query, String... terms) throws IOException {
query.setRewriteMethod(new MultiTermQuery.TopTermsScoringBooleanQueryRewrite(50));
final BooleanQuery bq = (BooleanQuery) searcher.rewrite(query);
final Set allowedTerms = asSet(terms);
diff --git a/lucene/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java b/lucene/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java
index 6fd423c66fb..53d2deaa6b2 100644
--- a/lucene/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java
+++ b/lucene/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java
@@ -50,7 +50,7 @@ public class TestTimeLimitingCollector extends LuceneTestCase {
private static final int N_DOCS = 3000;
private static final int N_THREADS = 50;
- private Searcher searcher;
+ private IndexSearcher searcher;
private Directory directory;
private IndexReader reader;
diff --git a/lucene/src/test/org/apache/lucene/search/TestWildcardRandom.java b/lucene/src/test/org/apache/lucene/search/TestWildcardRandom.java
index afad92fe7d8..bb07c16549d 100644
--- a/lucene/src/test/org/apache/lucene/search/TestWildcardRandom.java
+++ b/lucene/src/test/org/apache/lucene/search/TestWildcardRandom.java
@@ -38,7 +38,7 @@ import org.apache.lucene.util._TestUtil;
* and validates the correct number of hits are returned.
*/
public class TestWildcardRandom extends LuceneTestCase {
- private Searcher searcher;
+ private IndexSearcher searcher;
private IndexReader reader;
private Directory dir;
diff --git a/lucene/src/test/org/apache/lucene/search/function/TestCustomScoreQuery.java b/lucene/src/test/org/apache/lucene/search/function/TestCustomScoreQuery.java
index 33fc59d5d7d..9c32afd73de 100755
--- a/lucene/src/test/org/apache/lucene/search/function/TestCustomScoreQuery.java
+++ b/lucene/src/test/org/apache/lucene/search/function/TestCustomScoreQuery.java
@@ -329,7 +329,7 @@ public class TestCustomScoreQuery extends FunctionTestSetup {
}
}
- private void logResult(String msg, Searcher s, Query q, int doc, float score1) throws IOException {
+ private void logResult(String msg, IndexSearcher s, Query q, int doc, float score1) throws IOException {
log(msg+" "+score1);
log("Explain by: "+q);
log(s.explain(q,doc));
diff --git a/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java b/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java
index 449230acee6..a006fb04e81 100644
--- a/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java
+++ b/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java
@@ -32,10 +32,9 @@ import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.DefaultSimilarity;
import org.apache.lucene.search.Explanation;
-import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.QueryUtils;
import org.apache.lucene.search.ScoreDoc;
-import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.search.spans.SpanNearQuery;
@@ -325,7 +324,7 @@ public class TestPayloadNearQuery extends LuceneTestCase {
return 1.0f;
}
// idf used for phrase queries
- @Override public IDFExplanation idfExplain(Collection terms, Searcher searcher) throws IOException {
+ @Override public IDFExplanation idfExplain(Collection terms, IndexSearcher searcher) throws IOException {
return new IDFExplanation() {
@Override
public float getIdf() {
diff --git a/lucene/src/test/org/apache/lucene/search/spans/TestSpanMultiTermQueryWrapper.java b/lucene/src/test/org/apache/lucene/search/spans/TestSpanMultiTermQueryWrapper.java
index 49437b4953d..48c26c92da1 100644
--- a/lucene/src/test/org/apache/lucene/search/spans/TestSpanMultiTermQueryWrapper.java
+++ b/lucene/src/test/org/apache/lucene/search/spans/TestSpanMultiTermQueryWrapper.java
@@ -24,7 +24,6 @@ import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.FuzzyQuery;
import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.WildcardQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase;
@@ -35,7 +34,7 @@ import org.apache.lucene.util.LuceneTestCase;
public class TestSpanMultiTermQueryWrapper extends LuceneTestCase {
private Directory directory;
private IndexReader reader;
- private Searcher searcher;
+ private IndexSearcher searcher;
@Override
public void setUp() throws Exception {
diff --git a/lucene/src/test/org/apache/lucene/search/spans/TestSpans.java b/lucene/src/test/org/apache/lucene/search/spans/TestSpans.java
index 92d0742055f..ac96892d5a0 100644
--- a/lucene/src/test/org/apache/lucene/search/spans/TestSpans.java
+++ b/lucene/src/test/org/apache/lucene/search/spans/TestSpans.java
@@ -18,14 +18,13 @@ package org.apache.lucene.search.spans;
*/
import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.CheckHits;
import org.apache.lucene.search.Similarity;
import org.apache.lucene.search.DefaultSimilarity;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.store.Directory;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.index.IndexWriter;
@@ -416,7 +415,7 @@ public class TestSpans extends LuceneTestCase {
slop,
ordered) {
@Override
- public Similarity getSimilarity(Searcher s) {
+ public Similarity getSimilarity(IndexSearcher s) {
return sim;
}
};
@@ -439,7 +438,7 @@ public class TestSpans extends LuceneTestCase {
}
// LUCENE-1404
- private int hitCount(Searcher searcher, String word) throws Throwable {
+ private int hitCount(IndexSearcher searcher, String word) throws Throwable {
return searcher.search(new TermQuery(new Term("text", word)), 10).totalHits;
}
diff --git a/lucene/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java b/lucene/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java
index f3babbee92d..c1ed5283164 100644
--- a/lucene/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java
+++ b/lucene/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java
@@ -134,7 +134,7 @@ public class TestSpansAdvanced extends LuceneTestCase {
*
* @throws IOException
*/
- protected static void assertHits(Searcher s, Query query,
+ protected static void assertHits(IndexSearcher s, Query query,
final String description, final String[] expectedIds,
final float[] expectedScores) throws IOException {
QueryUtils.check(random, query, s);
diff --git a/modules/analysis/common/src/test/org/apache/lucene/collation/CollationTestBase.java b/modules/analysis/common/src/test/org/apache/lucene/collation/CollationTestBase.java
index 5cfc0357198..11b4eb5474e 100644
--- a/modules/analysis/common/src/test/org/apache/lucene/collation/CollationTestBase.java
+++ b/modules/analysis/common/src/test/org/apache/lucene/collation/CollationTestBase.java
@@ -26,13 +26,12 @@ import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermRangeFilter;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TermRangeQuery;
-import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
import org.apache.lucene.document.Field;
@@ -215,7 +214,7 @@ public abstract class CollationTestBase extends LuceneTestCase {
}
writer.optimize();
writer.close();
- Searcher searcher = new IndexSearcher(indexStore, true);
+ IndexSearcher searcher = new IndexSearcher(indexStore, true);
Sort sort = new Sort();
Query queryX = new TermQuery(new Term ("contents", "x"));
@@ -236,7 +235,7 @@ public abstract class CollationTestBase extends LuceneTestCase {
// Make sure the documents returned by the search match the expected list
// Copied from TestSort.java
- private void assertMatches(Searcher searcher, Query query, Sort sort,
+ private void assertMatches(IndexSearcher searcher, Query query, Sort sort,
String expectedResult) throws IOException {
ScoreDoc[] result = searcher.search(query, null, 1000, sort).scoreDocs;
StringBuilder buff = new StringBuilder(10);
diff --git a/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java b/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java
index b6848144bba..414cf23c4c6 100644
--- a/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java
+++ b/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java
@@ -36,7 +36,6 @@ import org.apache.lucene.search.MultiTermQuery;
import org.apache.lucene.search.TopFieldCollector;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopScoreDocCollector;
-import org.apache.lucene.search.Weight;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Sort;
@@ -114,12 +113,16 @@ public abstract class ReadTask extends PerfTask {
if (numHits > 0) {
if (withCollector() == false) {
if (sort != null) {
- Weight w = q.weight(searcher);
+ // TODO: instead of always passing false we
+ // should detect based on the query; if we make
+ // the IndexSearcher search methods that take
+ // Weight public again, we can go back to
+ // pulling the Weight ourselves:
TopFieldCollector collector = TopFieldCollector.create(sort, numHits,
true, withScore(),
withMaxScore(),
- !w.scoresDocsOutOfOrder());
- searcher.search(w, null, collector);
+ false);
+ searcher.search(q, null, collector);
hits = collector.topDocs();
} else {
hits = searcher.search(q, numHits);
diff --git a/modules/benchmark/src/java/org/apache/lucene/benchmark/quality/QualityBenchmark.java b/modules/benchmark/src/java/org/apache/lucene/benchmark/quality/QualityBenchmark.java
index 73a76b5ccba..0e91b6fe3e4 100644
--- a/modules/benchmark/src/java/org/apache/lucene/benchmark/quality/QualityBenchmark.java
+++ b/modules/benchmark/src/java/org/apache/lucene/benchmark/quality/QualityBenchmark.java
@@ -23,7 +23,7 @@ import org.apache.lucene.benchmark.quality.utils.DocNameExtractor;
import org.apache.lucene.benchmark.quality.utils.SubmissionReport;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
-import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.TopDocs;
/**
@@ -47,7 +47,7 @@ public class QualityBenchmark {
protected QualityQueryParser qqParser;
/** Index to be searched. */
- protected Searcher searcher;
+ protected IndexSearcher searcher;
/** index field to extract doc name for each search result; used for judging the results. */
protected String docNameField;
@@ -68,7 +68,7 @@ public class QualityBenchmark {
* and is important for judging the results.
*/
public QualityBenchmark(QualityQuery qqs[], QualityQueryParser qqParser,
- Searcher searcher, String docNameField) {
+ IndexSearcher searcher, String docNameField) {
this.qualityQueries = qqs;
this.qqParser = qqParser;
this.searcher = searcher;
diff --git a/modules/benchmark/src/java/org/apache/lucene/benchmark/quality/trec/QueryDriver.java b/modules/benchmark/src/java/org/apache/lucene/benchmark/quality/trec/QueryDriver.java
index 5f92f08d39e..af467cc83cf 100644
--- a/modules/benchmark/src/java/org/apache/lucene/benchmark/quality/trec/QueryDriver.java
+++ b/modules/benchmark/src/java/org/apache/lucene/benchmark/quality/trec/QueryDriver.java
@@ -17,13 +17,10 @@ package org.apache.lucene.benchmark.quality.trec;
* limitations under the License.
*/
-import org.apache.lucene.benchmark.quality.trec.TrecJudge;
-import org.apache.lucene.benchmark.quality.trec.TrecTopicsReader;
import org.apache.lucene.benchmark.quality.utils.SimpleQQParser;
import org.apache.lucene.benchmark.quality.utils.SubmissionReport;
import org.apache.lucene.benchmark.quality.*;
import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.Searcher;
import org.apache.lucene.store.FSDirectory;
import java.io.BufferedReader;
@@ -56,7 +53,7 @@ public class QueryDriver {
SubmissionReport submitLog = new SubmissionReport(new PrintWriter(args[2]), "lucene");
FSDirectory dir = FSDirectory.open(new File(args[3]));
String fieldSpec = args.length == 5 ? args[4] : "T"; // default to Title-only if not specified.
- Searcher searcher = new IndexSearcher(dir, true);
+ IndexSearcher searcher = new IndexSearcher(dir, true);
int maxResults = 1000;
String docNameField = "docname";
diff --git a/modules/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/DocNameExtractor.java b/modules/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/DocNameExtractor.java
index 36399561ced..8ab80ab1767 100755
--- a/modules/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/DocNameExtractor.java
+++ b/modules/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/DocNameExtractor.java
@@ -20,7 +20,7 @@ import java.io.IOException;
import org.apache.lucene.document.FieldSelector;
import org.apache.lucene.document.FieldSelectorResult;
-import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.IndexSearcher;
/**
* Utility: extract doc names from an index
@@ -52,7 +52,7 @@ public class DocNameExtractor {
* @return the name of the input doc as extracted from the index.
* @throws IOException if cannot extract the doc name from the index.
*/
- public String docName(Searcher searcher, int docid) throws IOException {
+ public String docName(IndexSearcher searcher, int docid) throws IOException {
return searcher.doc(docid,fldSel).get(docNameField);
}
diff --git a/modules/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/SubmissionReport.java b/modules/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/SubmissionReport.java
index f9ea2d0d29b..79e3f0b1644 100644
--- a/modules/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/SubmissionReport.java
+++ b/modules/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/SubmissionReport.java
@@ -22,7 +22,7 @@ import java.text.NumberFormat;
import org.apache.lucene.benchmark.quality.QualityQuery;
import org.apache.lucene.search.ScoreDoc;
-import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.TopDocs;
/**
@@ -58,7 +58,7 @@ public class SubmissionReport {
* @param searcher index access for fetching doc name.
* @throws IOException in case of a problem.
*/
- public void report(QualityQuery qq, TopDocs td, String docNameField, Searcher searcher) throws IOException {
+ public void report(QualityQuery qq, TopDocs td, String docNameField, IndexSearcher searcher) throws IOException {
if (logger==null) {
return;
}
diff --git a/solr/src/java/org/apache/solr/schema/LatLonType.java b/solr/src/java/org/apache/solr/schema/LatLonType.java
index b869b7210f4..5bda359d146 100644
--- a/solr/src/java/org/apache/solr/schema/LatLonType.java
+++ b/solr/src/java/org/apache/solr/schema/LatLonType.java
@@ -334,13 +334,13 @@ class SpatialDistanceQuery extends Query {
public void extractTerms(Set terms) {}
protected class SpatialWeight extends Weight {
- protected Searcher searcher;
+ protected IndexSearcher searcher;
protected float queryNorm;
protected float queryWeight;
protected Map latContext;
protected Map lonContext;
- public SpatialWeight(Searcher searcher) throws IOException {
+ public SpatialWeight(IndexSearcher searcher) throws IOException {
this.searcher = searcher;
this.latContext = latSource.newContext();
this.lonContext = lonSource.newContext();
@@ -535,7 +535,7 @@ class SpatialDistanceQuery extends Query {
@Override
- public Weight createWeight(Searcher searcher) throws IOException {
+ public Weight createWeight(IndexSearcher searcher) throws IOException {
return new SpatialWeight(searcher);
}
diff --git a/solr/src/java/org/apache/solr/search/LuceneQueryOptimizer.java b/solr/src/java/org/apache/solr/search/LuceneQueryOptimizer.java
index d003f40c5ce..65812a5bf41 100644
--- a/solr/src/java/org/apache/solr/search/LuceneQueryOptimizer.java
+++ b/solr/src/java/org/apache/solr/search/LuceneQueryOptimizer.java
@@ -54,7 +54,7 @@ class LuceneQueryOptimizer {
}
public TopDocs optimize(BooleanQuery original,
- Searcher searcher,
+ IndexSearcher searcher,
int numHits,
Query[] queryOut,
Filter[] filterOut
diff --git a/solr/src/java/org/apache/solr/search/SolrConstantScoreQuery.java b/solr/src/java/org/apache/solr/search/SolrConstantScoreQuery.java
index efb341b1da8..50c94775285 100755
--- a/solr/src/java/org/apache/solr/search/SolrConstantScoreQuery.java
+++ b/solr/src/java/org/apache/solr/search/SolrConstantScoreQuery.java
@@ -59,7 +59,7 @@ public class SolrConstantScoreQuery extends ConstantScoreQuery {
private float queryWeight;
private Map context;
- public ConstantWeight(Searcher searcher) throws IOException {
+ public ConstantWeight(IndexSearcher searcher) throws IOException {
this.similarity = getSimilarity(searcher);
this.context = ValueSource.newContext();
if (filter instanceof SolrFilter)
@@ -161,7 +161,7 @@ public class SolrConstantScoreQuery extends ConstantScoreQuery {
}
@Override
- public Weight createWeight(Searcher searcher) {
+ public Weight createWeight(IndexSearcher searcher) {
try {
return new SolrConstantScoreQuery.ConstantWeight(searcher);
} catch (IOException e) {
diff --git a/solr/src/java/org/apache/solr/search/SolrFilter.java b/solr/src/java/org/apache/solr/search/SolrFilter.java
index c09d5698cd8..2a368c30e96 100644
--- a/solr/src/java/org/apache/solr/search/SolrFilter.java
+++ b/solr/src/java/org/apache/solr/search/SolrFilter.java
@@ -18,7 +18,7 @@
package org.apache.solr.search;
import org.apache.lucene.search.Filter;
-import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.index.IndexReader;
@@ -35,7 +35,7 @@ public abstract class SolrFilter extends Filter {
/** Implementations should propagate createWeight to sub-ValueSources which can store weight info in the context.
* The context object will be passed to getDocIdSet() where this info can be retrieved. */
- public abstract void createWeight(Map context, Searcher searcher) throws IOException;
+ public abstract void createWeight(Map context, IndexSearcher searcher) throws IOException;
public abstract DocIdSet getDocIdSet(Map context, IndexReader reader) throws IOException;
diff --git a/solr/src/java/org/apache/solr/search/ValueSourceParser.java b/solr/src/java/org/apache/solr/search/ValueSourceParser.java
index 40800f6d2d3..f9ceb1feec6 100755
--- a/solr/src/java/org/apache/solr/search/ValueSourceParser.java
+++ b/solr/src/java/org/apache/solr/search/ValueSourceParser.java
@@ -20,7 +20,7 @@ import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.search.Query;
-import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.spell.JaroWinklerDistance;
import org.apache.lucene.search.spell.LevensteinDistance;
@@ -889,7 +889,7 @@ abstract class Double2Parser extends NamedParser {
}
@Override
- public void createWeight(Map context, Searcher searcher) throws IOException {
+ public void createWeight(Map context, IndexSearcher searcher) throws IOException {
a.createWeight(context,searcher);
b.createWeight(context,searcher);
}
diff --git a/solr/src/java/org/apache/solr/search/function/BoostedQuery.java b/solr/src/java/org/apache/solr/search/function/BoostedQuery.java
index 4bae945fc99..ad45f7bb15f 100755
--- a/solr/src/java/org/apache/solr/search/function/BoostedQuery.java
+++ b/solr/src/java/org/apache/solr/search/function/BoostedQuery.java
@@ -53,16 +53,16 @@ public class BoostedQuery extends Query {
q.extractTerms(terms);
}
- public Weight createWeight(Searcher searcher) throws IOException {
+ public Weight createWeight(IndexSearcher searcher) throws IOException {
return new BoostedQuery.BoostedWeight(searcher);
}
private class BoostedWeight extends Weight {
- Searcher searcher;
+ IndexSearcher searcher;
Weight qWeight;
Map context;
- public BoostedWeight(Searcher searcher) throws IOException {
+ public BoostedWeight(IndexSearcher searcher) throws IOException {
this.searcher = searcher;
this.qWeight = q.weight(searcher);
this.context = boostVal.newContext();
@@ -129,9 +129,9 @@ public class BoostedQuery extends Query {
private final Scorer scorer;
private final DocValues vals;
private final IndexReader reader;
- private final Searcher searcher;
+ private final IndexSearcher searcher;
- private CustomScorer(Similarity similarity, Searcher searcher, IndexReader reader, BoostedQuery.BoostedWeight w,
+ private CustomScorer(Similarity similarity, IndexSearcher searcher, IndexReader reader, BoostedQuery.BoostedWeight w,
Scorer scorer, ValueSource vs) throws IOException {
super(similarity);
this.weight = w;
diff --git a/solr/src/java/org/apache/solr/search/function/DocFreqValueSource.java b/solr/src/java/org/apache/solr/search/function/DocFreqValueSource.java
index 16fae02f419..2fef6ac117e 100755
--- a/solr/src/java/org/apache/solr/search/function/DocFreqValueSource.java
+++ b/solr/src/java/org/apache/solr/search/function/DocFreqValueSource.java
@@ -19,7 +19,7 @@ package org.apache.solr.search.function;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
-import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.util.BytesRef;
import org.apache.solr.search.MutableValueInt;
import org.apache.solr.search.MutableValue;
@@ -240,13 +240,13 @@ public class DocFreqValueSource extends ValueSource {
@Override
public DocValues getValues(Map context, IndexReader reader) throws IOException {
- Searcher searcher = (Searcher)context.get("searcher");
+ IndexSearcher searcher = (IndexSearcher)context.get("searcher");
int docfreq = searcher.docFreq(new Term(indexedField, indexedBytes));
return new ConstIntDocValues(docfreq, this);
}
@Override
- public void createWeight(Map context, Searcher searcher) throws IOException {
+ public void createWeight(Map context, IndexSearcher searcher) throws IOException {
context.put("searcher",searcher);
}
diff --git a/solr/src/java/org/apache/solr/search/function/DualFloatFunction.java b/solr/src/java/org/apache/solr/search/function/DualFloatFunction.java
index d6d21368f78..0b4b54c444f 100755
--- a/solr/src/java/org/apache/solr/search/function/DualFloatFunction.java
+++ b/solr/src/java/org/apache/solr/search/function/DualFloatFunction.java
@@ -18,7 +18,7 @@
package org.apache.solr.search.function;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.IndexSearcher;
import java.io.IOException;
import java.util.Map;
@@ -69,7 +69,7 @@ public abstract class DualFloatFunction extends ValueSource {
}
@Override
- public void createWeight(Map context, Searcher searcher) throws IOException {
+ public void createWeight(Map context, IndexSearcher searcher) throws IOException {
a.createWeight(context,searcher);
b.createWeight(context,searcher);
}
diff --git a/solr/src/java/org/apache/solr/search/function/FunctionQuery.java b/solr/src/java/org/apache/solr/search/function/FunctionQuery.java
index f34830f92d9..6a8f5f40072 100644
--- a/solr/src/java/org/apache/solr/search/function/FunctionQuery.java
+++ b/solr/src/java/org/apache/solr/search/function/FunctionQuery.java
@@ -60,12 +60,12 @@ public class FunctionQuery extends Query {
public void extractTerms(Set terms) {}
protected class FunctionWeight extends Weight {
- protected Searcher searcher;
+ protected IndexSearcher searcher;
protected float queryNorm;
protected float queryWeight;
protected Map context;
- public FunctionWeight(Searcher searcher) throws IOException {
+ public FunctionWeight(IndexSearcher searcher) throws IOException {
this.searcher = searcher;
this.context = func.newContext();
func.createWeight(context, searcher);
@@ -184,7 +184,7 @@ public class FunctionQuery extends Query {
@Override
- public Weight createWeight(Searcher searcher) throws IOException {
+ public Weight createWeight(IndexSearcher searcher) throws IOException {
return new FunctionQuery.FunctionWeight(searcher);
}
diff --git a/solr/src/java/org/apache/solr/search/function/IDFValueSource.java b/solr/src/java/org/apache/solr/search/function/IDFValueSource.java
index 0eba06bfc91..5cb86fbb6b3 100755
--- a/solr/src/java/org/apache/solr/search/function/IDFValueSource.java
+++ b/solr/src/java/org/apache/solr/search/function/IDFValueSource.java
@@ -18,7 +18,7 @@
package org.apache.solr.search.function;
import org.apache.lucene.index.*;
-import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Similarity;
import org.apache.lucene.util.BytesRef;
import org.apache.solr.util.ByteUtils;
@@ -39,7 +39,7 @@ public class IDFValueSource extends DocFreqValueSource {
@Override
public DocValues getValues(Map context, IndexReader reader) throws IOException {
- Searcher searcher = (Searcher)context.get("searcher");
+ IndexSearcher searcher = (IndexSearcher)context.get("searcher");
Similarity sim = searcher.getSimilarity();
// todo: we need docFreq that takes a BytesRef
String strVal = ByteUtils.UTF8toUTF16(indexedBytes);
diff --git a/solr/src/java/org/apache/solr/search/function/LinearFloatFunction.java b/solr/src/java/org/apache/solr/search/function/LinearFloatFunction.java
index cf523d69e05..79a3a0ac37f 100644
--- a/solr/src/java/org/apache/solr/search/function/LinearFloatFunction.java
+++ b/solr/src/java/org/apache/solr/search/function/LinearFloatFunction.java
@@ -18,7 +18,7 @@
package org.apache.solr.search.function;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.IndexSearcher;
import java.io.IOException;
import java.util.Map;
@@ -71,7 +71,7 @@ public class LinearFloatFunction extends ValueSource {
}
@Override
- public void createWeight(Map context, Searcher searcher) throws IOException {
+ public void createWeight(Map context, IndexSearcher searcher) throws IOException {
source.createWeight(context, searcher);
}
diff --git a/solr/src/java/org/apache/solr/search/function/MaxDocValueSource.java b/solr/src/java/org/apache/solr/search/function/MaxDocValueSource.java
index 3594b233c3b..6f4bebcd586 100755
--- a/solr/src/java/org/apache/solr/search/function/MaxDocValueSource.java
+++ b/solr/src/java/org/apache/solr/search/function/MaxDocValueSource.java
@@ -17,7 +17,7 @@
package org.apache.solr.search.function;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.IndexSearcher;
import java.io.IOException;
import java.util.Map;
@@ -33,13 +33,13 @@ public class MaxDocValueSource extends ValueSource {
}
@Override
- public void createWeight(Map context, Searcher searcher) throws IOException {
+ public void createWeight(Map context, IndexSearcher searcher) throws IOException {
context.put("searcher",searcher);
}
@Override
public DocValues getValues(Map context, IndexReader reader) throws IOException {
- Searcher searcher = (Searcher)context.get("searcher");
+ IndexSearcher searcher = (IndexSearcher)context.get("searcher");
return new ConstIntDocValues(searcher.maxDoc(), this);
}
diff --git a/solr/src/java/org/apache/solr/search/function/MaxFloatFunction.java b/solr/src/java/org/apache/solr/search/function/MaxFloatFunction.java
index 4add42bd0ac..bab340f36b3 100644
--- a/solr/src/java/org/apache/solr/search/function/MaxFloatFunction.java
+++ b/solr/src/java/org/apache/solr/search/function/MaxFloatFunction.java
@@ -18,7 +18,7 @@
package org.apache.solr.search.function;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.IndexSearcher;
import java.io.IOException;
import java.util.Map;
@@ -71,7 +71,7 @@ public class MaxFloatFunction extends ValueSource {
}
@Override
- public void createWeight(Map context, Searcher searcher) throws IOException {
+ public void createWeight(Map context, IndexSearcher searcher) throws IOException {
source.createWeight(context, searcher);
}
diff --git a/solr/src/java/org/apache/solr/search/function/MultiFloatFunction.java b/solr/src/java/org/apache/solr/search/function/MultiFloatFunction.java
index 29dbbe79d4f..331cd649095 100644
--- a/solr/src/java/org/apache/solr/search/function/MultiFloatFunction.java
+++ b/solr/src/java/org/apache/solr/search/function/MultiFloatFunction.java
@@ -17,7 +17,7 @@ package org.apache.solr.search.function;
*/
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.IndexSearcher;
import java.util.Map;
import java.util.Arrays;
@@ -95,7 +95,7 @@ public abstract class MultiFloatFunction extends ValueSource {
}
@Override
- public void createWeight(Map context, Searcher searcher) throws IOException {
+ public void createWeight(Map context, IndexSearcher searcher) throws IOException {
for (ValueSource source : sources)
source.createWeight(context, searcher);
}
diff --git a/solr/src/java/org/apache/solr/search/function/NormValueSource.java b/solr/src/java/org/apache/solr/search/function/NormValueSource.java
index 7876a742e01..913f4670511 100755
--- a/solr/src/java/org/apache/solr/search/function/NormValueSource.java
+++ b/solr/src/java/org/apache/solr/search/function/NormValueSource.java
@@ -18,7 +18,7 @@
package org.apache.solr.search.function;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Similarity;
import java.io.IOException;
import java.util.Map;
@@ -39,13 +39,13 @@ public class NormValueSource extends ValueSource {
}
@Override
- public void createWeight(Map context, Searcher searcher) throws IOException {
+ public void createWeight(Map context, IndexSearcher searcher) throws IOException {
context.put("searcher",searcher);
}
@Override
public DocValues getValues(Map context, IndexReader reader) throws IOException {
- Searcher searcher = (Searcher)context.get("searcher");
+ IndexSearcher searcher = (IndexSearcher)context.get("searcher");
final Similarity similarity = searcher.getSimilarity();
final byte[] norms = reader.norms(field);
if (norms == null) {
diff --git a/solr/src/java/org/apache/solr/search/function/QueryValueSource.java b/solr/src/java/org/apache/solr/search/function/QueryValueSource.java
index 93ee47059e5..37bc35e7a68 100755
--- a/solr/src/java/org/apache/solr/search/function/QueryValueSource.java
+++ b/solr/src/java/org/apache/solr/search/function/QueryValueSource.java
@@ -59,7 +59,7 @@ public class QueryValueSource extends ValueSource {
}
@Override
- public void createWeight(Map context, Searcher searcher) throws IOException {
+ public void createWeight(Map context, IndexSearcher searcher) throws IOException {
Weight w = q.weight(searcher);
context.put(this, w);
}
diff --git a/solr/src/java/org/apache/solr/search/function/RangeMapFloatFunction.java b/solr/src/java/org/apache/solr/search/function/RangeMapFloatFunction.java
index 7823f343a5b..32544e5048f 100755
--- a/solr/src/java/org/apache/solr/search/function/RangeMapFloatFunction.java
+++ b/solr/src/java/org/apache/solr/search/function/RangeMapFloatFunction.java
@@ -18,7 +18,7 @@
package org.apache.solr.search.function;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.IndexSearcher;
import java.io.IOException;
import java.util.Map;
@@ -76,7 +76,7 @@ public class RangeMapFloatFunction extends ValueSource {
}
@Override
- public void createWeight(Map context, Searcher searcher) throws IOException {
+ public void createWeight(Map context, IndexSearcher searcher) throws IOException {
source.createWeight(context, searcher);
}
diff --git a/solr/src/java/org/apache/solr/search/function/ReciprocalFloatFunction.java b/solr/src/java/org/apache/solr/search/function/ReciprocalFloatFunction.java
index a72dcf21af4..fcfa8324c22 100644
--- a/solr/src/java/org/apache/solr/search/function/ReciprocalFloatFunction.java
+++ b/solr/src/java/org/apache/solr/search/function/ReciprocalFloatFunction.java
@@ -18,7 +18,7 @@
package org.apache.solr.search.function;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.IndexSearcher;
import java.io.IOException;
import java.util.Map;
@@ -84,7 +84,7 @@ public class ReciprocalFloatFunction extends ValueSource {
}
@Override
- public void createWeight(Map context, Searcher searcher) throws IOException {
+ public void createWeight(Map context, IndexSearcher searcher) throws IOException {
source.createWeight(context, searcher);
}
diff --git a/solr/src/java/org/apache/solr/search/function/ScaleFloatFunction.java b/solr/src/java/org/apache/solr/search/function/ScaleFloatFunction.java
index 229f37d7de9..6e8cd65c343 100755
--- a/solr/src/java/org/apache/solr/search/function/ScaleFloatFunction.java
+++ b/solr/src/java/org/apache/solr/search/function/ScaleFloatFunction.java
@@ -18,7 +18,7 @@
package org.apache.solr.search.function;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.IndexSearcher;
import java.io.IOException;
import java.util.Map;
@@ -110,7 +110,7 @@ public class ScaleFloatFunction extends ValueSource {
}
@Override
- public void createWeight(Map context, Searcher searcher) throws IOException {
+ public void createWeight(Map context, IndexSearcher searcher) throws IOException {
source.createWeight(context, searcher);
}
diff --git a/solr/src/java/org/apache/solr/search/function/SingleFunction.java b/solr/src/java/org/apache/solr/search/function/SingleFunction.java
index 06c7bae2cd3..e040dc5b250 100755
--- a/solr/src/java/org/apache/solr/search/function/SingleFunction.java
+++ b/solr/src/java/org/apache/solr/search/function/SingleFunction.java
@@ -17,7 +17,7 @@
package org.apache.solr.search.function;
-import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.IndexSearcher;
import java.io.IOException;
import java.util.Map;
@@ -49,7 +49,7 @@ import java.util.Map;
}
@Override
- public void createWeight(Map context, Searcher searcher) throws IOException {
+ public void createWeight(Map context, IndexSearcher searcher) throws IOException {
source.createWeight(context, searcher);
}
}
\ No newline at end of file
diff --git a/solr/src/java/org/apache/solr/search/function/TFValueSource.java b/solr/src/java/org/apache/solr/search/function/TFValueSource.java
index 1b5e130555f..fa82de53a25 100755
--- a/solr/src/java/org/apache/solr/search/function/TFValueSource.java
+++ b/solr/src/java/org/apache/solr/search/function/TFValueSource.java
@@ -2,7 +2,7 @@ package org.apache.solr.search.function;
import org.apache.lucene.index.*;
import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Similarity;
import org.apache.lucene.util.BytesRef;
import org.apache.solr.common.SolrException;
@@ -25,7 +25,7 @@ public class TFValueSource extends TermFreqValueSource {
// use MultiFields, just in case someone did a top() function
Fields fields = MultiFields.getFields(reader);
final Terms terms = fields.terms(field);
- final Similarity similarity = ((Searcher)context.get("searcher")).getSimilarity();
+ final Similarity similarity = ((IndexSearcher)context.get("searcher")).getSimilarity();
return new FloatDocValues(this) {
DocsEnum docs ;
diff --git a/solr/src/java/org/apache/solr/search/function/ValueSource.java b/solr/src/java/org/apache/solr/search/function/ValueSource.java
index 6db9f73c10f..48a56ce15cf 100644
--- a/solr/src/java/org/apache/solr/search/function/ValueSource.java
+++ b/solr/src/java/org/apache/solr/search/function/ValueSource.java
@@ -21,7 +21,7 @@ import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.FieldComparator;
import org.apache.lucene.search.FieldComparatorSource;
import org.apache.lucene.search.Scorer;
-import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.SortField;
import org.apache.lucene.util.Bits;
import org.apache.lucene.index.MultiFields;
@@ -84,7 +84,7 @@ public abstract class ValueSource implements Serializable {
* weight info in the context. The context object will be passed to getValues()
* where this info can be retrieved.
*/
- public void createWeight(Map context, Searcher searcher) throws IOException {
+ public void createWeight(Map context, IndexSearcher searcher) throws IOException {
}
/**
diff --git a/solr/src/java/org/apache/solr/search/function/ValueSourceRangeFilter.java b/solr/src/java/org/apache/solr/search/function/ValueSourceRangeFilter.java
index 340498121bb..581792c9039 100755
--- a/solr/src/java/org/apache/solr/search/function/ValueSourceRangeFilter.java
+++ b/solr/src/java/org/apache/solr/search/function/ValueSourceRangeFilter.java
@@ -19,7 +19,7 @@ package org.apache.solr.search.function;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.index.IndexReader;
import org.apache.solr.search.SolrFilter;
@@ -58,7 +58,7 @@ public class ValueSourceRangeFilter extends SolrFilter {
}
@Override
- public void createWeight(Map context, Searcher searcher) throws IOException {
+ public void createWeight(Map context, IndexSearcher searcher) throws IOException {
valueSource.createWeight(context, searcher);
}
diff --git a/solr/src/java/org/apache/solr/search/function/VectorValueSource.java b/solr/src/java/org/apache/solr/search/function/VectorValueSource.java
index 4a8542d855f..5947df8041e 100644
--- a/solr/src/java/org/apache/solr/search/function/VectorValueSource.java
+++ b/solr/src/java/org/apache/solr/search/function/VectorValueSource.java
@@ -17,7 +17,7 @@ package org.apache.solr.search.function;
*/
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.IndexSearcher;
import org.apache.solr.search.function.MultiValueSource;
import org.apache.solr.search.function.DocValues;
import org.apache.solr.search.function.ValueSource;
@@ -178,7 +178,7 @@ public class VectorValueSource extends MultiValueSource {
};
}
- public void createWeight(Map context, Searcher searcher) throws IOException {
+ public void createWeight(Map context, IndexSearcher searcher) throws IOException {
for (ValueSource source : sources)
source.createWeight(context, searcher);
}
diff --git a/solr/src/java/org/apache/solr/search/function/distance/GeohashHaversineFunction.java b/solr/src/java/org/apache/solr/search/function/distance/GeohashHaversineFunction.java
index b461658cd0d..cdcc182d13f 100644
--- a/solr/src/java/org/apache/solr/search/function/distance/GeohashHaversineFunction.java
+++ b/solr/src/java/org/apache/solr/search/function/distance/GeohashHaversineFunction.java
@@ -21,7 +21,7 @@ import org.apache.lucene.spatial.DistanceUtils;
import org.apache.solr.search.function.ValueSource;
import org.apache.solr.search.function.DocValues;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.spatial.geohash.GeoHashUtils;
import java.util.Map;
@@ -108,7 +108,7 @@ public class GeohashHaversineFunction extends ValueSource {
}
@Override
- public void createWeight(Map context, Searcher searcher) throws IOException {
+ public void createWeight(Map context, IndexSearcher searcher) throws IOException {
geoHash1.createWeight(context, searcher);
geoHash2.createWeight(context, searcher);
}
diff --git a/solr/src/java/org/apache/solr/search/function/distance/HaversineConstFunction.java b/solr/src/java/org/apache/solr/search/function/distance/HaversineConstFunction.java
index 853d0640fd3..b1796b2646a 100755
--- a/solr/src/java/org/apache/solr/search/function/distance/HaversineConstFunction.java
+++ b/solr/src/java/org/apache/solr/search/function/distance/HaversineConstFunction.java
@@ -18,7 +18,7 @@ package org.apache.solr.search.function.distance;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.queryParser.ParseException;
-import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.spatial.DistanceUtils;
import org.apache.lucene.spatial.tier.InvalidGeoException;
import org.apache.solr.common.params.SpatialParams;
@@ -234,7 +234,7 @@ public class HaversineConstFunction extends ValueSource {
}
@Override
- public void createWeight(Map context, Searcher searcher) throws IOException {
+ public void createWeight(Map context, IndexSearcher searcher) throws IOException {
latSource.createWeight(context, searcher);
lonSource.createWeight(context, searcher);
}
diff --git a/solr/src/java/org/apache/solr/search/function/distance/HaversineFunction.java b/solr/src/java/org/apache/solr/search/function/distance/HaversineFunction.java
index c442b06e700..673840622c0 100644
--- a/solr/src/java/org/apache/solr/search/function/distance/HaversineFunction.java
+++ b/solr/src/java/org/apache/solr/search/function/distance/HaversineFunction.java
@@ -17,7 +17,7 @@ package org.apache.solr.search.function.distance;
*/
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.spatial.DistanceUtils;
import org.apache.solr.common.SolrException;
import org.apache.solr.search.function.MultiValueSource;
@@ -132,7 +132,7 @@ public class HaversineFunction extends ValueSource {
}
@Override
- public void createWeight(Map context, Searcher searcher) throws IOException {
+ public void createWeight(Map context, IndexSearcher searcher) throws IOException {
p1.createWeight(context, searcher);
p2.createWeight(context, searcher);
diff --git a/solr/src/java/org/apache/solr/search/function/distance/VectorDistanceFunction.java b/solr/src/java/org/apache/solr/search/function/distance/VectorDistanceFunction.java
index 5d876836b89..95495bd3f8e 100644
--- a/solr/src/java/org/apache/solr/search/function/distance/VectorDistanceFunction.java
+++ b/solr/src/java/org/apache/solr/search/function/distance/VectorDistanceFunction.java
@@ -17,7 +17,7 @@ package org.apache.solr.search.function.distance;
*/
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.search.Searcher;
+import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.spatial.DistanceUtils;
import org.apache.solr.common.SolrException;
import org.apache.solr.search.function.DocValues;
@@ -130,7 +130,7 @@ public class VectorDistanceFunction extends ValueSource {
}
@Override
- public void createWeight(Map context, Searcher searcher) throws IOException {
+ public void createWeight(Map context, IndexSearcher searcher) throws IOException {
source1.createWeight(context, searcher);
source2.createWeight(context, searcher);
}