LUCENE-2837: collapse Searcher/Searchable into IndexSearcher; remove contrib/remote, MultiSearcher; absorb ParallelMultiSearcher into IndexSearcher as optional ExecutorService to ctor

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1055416 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael McCandless 2011-01-05 11:16:40 +00:00
parent 570e19e579
commit 87274d00ac
117 changed files with 785 additions and 3748 deletions

View File

@ -123,6 +123,11 @@ Changes in backwards compatibility policy
you really want a top-level norms, use MultiNorms or SlowMultiReaderWrapper.
(Uwe Schindler, Robert Muir)
* LUCENE-2837: Collapsed Searcher, Searchable into IndexSearcher;
removed contrib/remote and MultiSearcher (Mike McCandless); absorbed
ParallelMultiSearcher into IndexSearcher as an optional
ExecutorServiced passed to its ctor. (Mike McCandless)
Changes in Runtime Behavior
* LUCENE-2650, LUCENE-2825: The behavior of FSDirectory.open has changed. On 64-bit

View File

@ -44,7 +44,6 @@ import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;
@ -268,7 +267,7 @@ public class IndexTask extends Task {
FSDirectory dir = FSDirectory.open(indexDir);
try {
Searcher searcher = null;
IndexSearcher searcher = null;
boolean checkLastModified = false;
if (!create) {
try {

View File

@ -18,14 +18,13 @@ package org.apache.lucene.ant;
*/
import java.io.File;
import java.io.IOException;
import java.io.IOException; // javadoc
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.core.StopAnalyzer;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.store.Directory;
import org.apache.tools.ant.Project;
import org.apache.tools.ant.types.FileSet;
@ -39,7 +38,7 @@ public class IndexTaskTest extends LuceneTestCase {
private final static String docHandler =
"org.apache.lucene.ant.FileExtensionDocumentHandler";
private Searcher searcher;
private IndexSearcher searcher;
private Analyzer analyzer;
private Directory dir;

View File

@ -34,7 +34,6 @@ import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.TopScoreDocCollector;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;
@ -92,7 +91,7 @@ public class SearchFiles {
IndexReader reader = IndexReader.open(FSDirectory.open(new File(index)), true); // only searching, so read-only=true
Searcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = new IndexSearcher(reader);
Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_CURRENT);
BufferedReader in = null;
@ -144,7 +143,7 @@ public class SearchFiles {
* This simulates the streaming search use case, where all hits are supposed to
* be processed, regardless of their relevance.
*/
public static void doStreamingSearch(final Searcher searcher, Query query) throws IOException {
public static void doStreamingSearch(final IndexSearcher searcher, Query query) throws IOException {
Collector streamingHitCollector = new Collector() {
private Scorer scorer;
private int docBase;
@ -186,7 +185,7 @@ public class SearchFiles {
* is executed another time and all hits are collected.
*
*/
public static void doPagingSearch(BufferedReader in, Searcher searcher, Query query,
public static void doPagingSearch(BufferedReader in, IndexSearcher searcher, Query query,
int hitsPerPage, boolean raw, boolean interactive) throws IOException {
// Collect enough docs to show 5 pages

View File

@ -58,7 +58,6 @@ import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.FilteredQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MultiPhraseQuery;
import org.apache.lucene.search.MultiSearcher;
import org.apache.lucene.search.MultiTermQuery;
import org.apache.lucene.search.NumericRangeQuery;
import org.apache.lucene.search.PhraseQuery;
@ -1301,68 +1300,6 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
assertEquals("XHTML Encoding should have worked:", rawDocContent, decodedSnippet);
}
public void testMultiSearcher() throws Exception {
// setup index 1
Directory ramDir1 = newDirectory();
IndexWriter writer1 = new IndexWriter(ramDir1, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true)));
Document d = new Document();
Field f = new Field(FIELD_NAME, "multiOne", Field.Store.YES, Field.Index.ANALYZED);
d.add(f);
writer1.addDocument(d);
writer1.optimize();
writer1.close();
IndexReader reader1 = IndexReader.open(ramDir1, true);
// setup index 2
Directory ramDir2 = newDirectory();
IndexWriter writer2 = new IndexWriter(ramDir2, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true)));
d = new Document();
f = new Field(FIELD_NAME, "multiTwo", Field.Store.YES, Field.Index.ANALYZED);
d.add(f);
writer2.addDocument(d);
writer2.optimize();
writer2.close();
IndexReader reader2 = IndexReader.open(ramDir2, true);
IndexSearcher searchers[] = new IndexSearcher[2];
searchers[0] = new IndexSearcher(ramDir1, true);
searchers[1] = new IndexSearcher(ramDir2, true);
MultiSearcher multiSearcher = new MultiSearcher(searchers);
QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, FIELD_NAME, new MockAnalyzer(MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true));
parser.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
query = parser.parse("multi*");
if (VERBOSE) System.out.println("Searching for: " + query.toString(FIELD_NAME));
// at this point the multisearcher calls combine(query[])
hits = multiSearcher.search(query, null, 1000);
// query = QueryParser.parse("multi*", FIELD_NAME, new StandardAnalyzer(TEST_VERSION));
Query expandedQueries[] = new Query[2];
expandedQueries[0] = query.rewrite(reader1);
expandedQueries[1] = query.rewrite(reader2);
query = query.combine(expandedQueries);
// create an instance of the highlighter with the tags used to surround
// highlighted text
Highlighter highlighter = new Highlighter(this, new QueryTermScorer(query));
for (int i = 0; i < hits.totalHits; i++) {
String text = multiSearcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
String highlightedText = highlighter.getBestFragment(tokenStream, text);
if (VERBOSE) System.out.println(highlightedText);
}
assertTrue("Failed to find correct number of highlights " + numHighlights + " found",
numHighlights == 2);
reader1.close();
reader2.close();
searchers[0].close();
searchers[1].close();
ramDir1.close();
ramDir2.close();
}
public void testFieldSpecificHighlighting() throws Exception {
TestHighlightRunner helper = new TestHighlightRunner() {

View File

@ -53,11 +53,10 @@ import org.apache.lucene.queryParser.MultiFieldQueryParser;
import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.search.Collector;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;
import org.apache.lucene.util.BytesRef;
@ -73,7 +72,7 @@ class LuceneMethods {
private List<String> fields; //Fields as a vector
private List<String> indexedFields; //Fields as a vector
private String fieldsArray[]; //Fields as an array
private Searcher searcher;
private IndexSearcher searcher;
private Query query; //current query string
private String analyzerClassFQN = null; // Analyzer class, if NULL, use default Analyzer

View File

@ -51,7 +51,6 @@ import org.apache.lucene.index.FieldInvertState;
import org.apache.lucene.search.Collector;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Similarity;
import org.apache.lucene.store.RAMDirectory; // for javadocs
@ -421,7 +420,7 @@ public class MemoryIndex implements Serializable {
if (query == null)
throw new IllegalArgumentException("query must not be null");
Searcher searcher = createSearcher();
IndexSearcher searcher = createSearcher();
try {
final float[] scores = new float[1]; // inits to 0.0f (no match)
searcher.search(query, new Collector() {
@ -738,7 +737,7 @@ public class MemoryIndex implements Serializable {
*/
private final class MemoryIndexReader extends IndexReader {
private Searcher searcher; // needed to find searcher.getSimilarity()
private IndexSearcher searcher; // needed to find searcher.getSimilarity()
private MemoryIndexReader() {
super(); // avoid as much superclass baggage as possible
@ -1135,7 +1134,7 @@ public class MemoryIndex implements Serializable {
return Similarity.getDefault();
}
private void setSearcher(Searcher searcher) {
private void setSearcher(IndexSearcher searcher) {
this.searcher = searcher;
}

View File

@ -23,7 +23,7 @@ import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.DefaultSimilarity;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Similarity;
/**
* The BoostingQuery class can be used to effectively demote results that match a given query.
@ -58,7 +58,7 @@ public class BoostingQuery extends Query {
BooleanQuery result = new BooleanQuery() {
@Override
public Similarity getSimilarity(Searcher searcher) {
public Similarity getSimilarity(IndexSearcher searcher) {
return new DefaultSimilarity() {
@Override

View File

@ -360,7 +360,7 @@ public class FuzzyLikeThisQuery extends Query
this.ignoreTF=ignoreTF;
}
@Override
public Similarity getSimilarity(Searcher searcher)
public Similarity getSimilarity(IndexSearcher searcher)
{
Similarity result = super.getSimilarity(searcher);
result = new SimilarityDelegator(result) {

View File

@ -29,11 +29,10 @@ import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.CachingWrapperFilter;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryWrapperFilter;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TermRangeFilter;
import org.apache.lucene.search.TopDocs;
@ -195,7 +194,7 @@ public class ChainedFilterTest extends LuceneTestCase {
IndexReader reader = writer.getReader();
writer.close();
Searcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = new IndexSearcher(reader);
Query query = new TermQuery(new Term("none", "none"));

View File

@ -27,10 +27,8 @@ import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MultiSearcher;
import org.apache.lucene.search.spans.SpanFirstQuery;
import org.apache.lucene.search.spans.SpanMultiTermQueryWrapper;
import org.apache.lucene.search.spans.SpanNearQuery;
import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.LockObtainFailedException;
@ -85,33 +83,6 @@ public class TestSpanRegexQuery extends LuceneTestCase {
directory.close();
}
public void testSpanRegexBug() throws CorruptIndexException, IOException {
createRAMDirectories();
SpanQuery srq = new SpanMultiTermQueryWrapper<RegexQuery>(new RegexQuery(new Term("field", "a.*")));
SpanQuery stq = new SpanMultiTermQueryWrapper<RegexQuery>(new RegexQuery(new Term("field", "b.*")));
SpanNearQuery query = new SpanNearQuery(new SpanQuery[] { srq, stq }, 6,
true);
// 1. Search the same store which works
IndexSearcher[] arrSearcher = new IndexSearcher[2];
arrSearcher[0] = new IndexSearcher(indexStoreA, true);
arrSearcher[1] = new IndexSearcher(indexStoreB, true);
MultiSearcher searcher = new MultiSearcher(arrSearcher);
int numHits = searcher.search(query, null, 1000).totalHits;
arrSearcher[0].close();
arrSearcher[1].close();
// Will fail here
// We expect 2 but only one matched
// The rewriter function only write it once on the first IndexSearcher
// So it's using term: a1 b1 to search on the second IndexSearcher
// As a result, it won't match the document in the second IndexSearcher
assertEquals(2, numHits);
indexStoreA.close();
indexStoreB.close();
}
private void createRAMDirectories() throws CorruptIndexException,
LockObtainFailedException, IOException {
// creating a document to store

View File

@ -21,7 +21,6 @@ import java.io.IOException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.Collector;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Query;
@ -122,7 +121,7 @@ public class BooleanQueryTst {
/* if (verbose) System.out.println("Lucene: " + query.toString()); */
TestCollector tc = new TestCollector();
Searcher searcher = new IndexSearcher(dBase.getDb(), true);
IndexSearcher searcher = new IndexSearcher(dBase.getDb(), true);
try {
searcher.search(query, tc);
} finally {

View File

@ -1,34 +0,0 @@
<?xml version="1.0"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<project name="remote" default="default">
<description>
Remote Searchable based on RMI
</description>
<import file="../contrib-build.xml"/>
<!-- Overrides common.compile-core to add rmic -->
<target name="compile-core" depends="common.compile-core" description="Compiles core classes, including rmic">
<rmic classname="org.apache.lucene.search.RemoteSearchable" base="${build.dir}/classes/java" stubversion="1.2">
<classpath refid="classpath" />
</rmic>
</target>
</project>

View File

@ -1,36 +0,0 @@
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-contrib</artifactId>
<version>@version@</version>
</parent>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-remote</artifactId>
<name>Lucene Remote</name>
<version>@version@</version>
<description>Remote Searchable based on RMI</description>
<packaging>jar</packaging>
</project>

View File

@ -1,44 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.search;
import java.rmi.Remote;
/**
* Marker interface to enable subclasses of {@link org.apache.lucene.search.Searchable} to be used via
* Java RMI. Classes implementing this interface can be used as a RMI -
* "remote object".
* <p>
* {@link RMIRemoteSearchable} extends {@link org.apache.lucene.search.Searchable} and can transparently
* be used as a such.
* <p>
* Example usage:
*
* <pre>
* RMIRemoteSearchable remoteObject = ...;
* String remoteObjectName = ...;
* Naming.rebind (remoteObjectName, remoteObject);
* Searchable luceneSearchable = (Searchable) Naming.lookup (remoteObjectName);
* </pre>
*
* </p>
* </p>
*/
public interface RMIRemoteSearchable extends Searchable, Remote {
}

View File

@ -1,57 +0,0 @@
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.index.IndexReader;
/**
* Provides caching of {@link org.apache.lucene.search.Filter}s themselves on the remote end of an RMI connection.
* The cache is keyed on Filter's hashCode(), so if it sees the same filter twice
* it will reuse the original version.
* <p/>
* NOTE: This does NOT cache the Filter bits, but rather the Filter itself.
* Thus, this works hand-in-hand with {@link org.apache.lucene.search.CachingWrapperFilter} to keep both
* file Filter cache and the Filter bits on the remote end, close to the searcher.
* <p/>
* Usage:
* <p/>
* To cache a result you must do something like
* RemoteCachingWrapperFilter f = new RemoteCachingWrapperFilter(new CachingWrapperFilter(myFilter));
* <p/>
*/
public class RemoteCachingWrapperFilter extends Filter {
protected Filter filter;
public RemoteCachingWrapperFilter(Filter filter) {
this.filter = filter;
}
/**
* Uses the {@link org.apache.lucene.search.FilterManager} to keep the cache for a filter on the
* searcher side of a remote connection.
* @param reader the index reader for the Filter
* @return the DocIdSet
*/
@Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException {
Filter cachedFilter = FilterManager.getInstance().getFilter(filter);
return cachedFilter.getDocIdSet(reader);
}
}

View File

@ -1,119 +0,0 @@
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.store.FSDirectory;
import java.io.IOException;
import java.io.File;
import java.rmi.Naming;
import java.rmi.RMISecurityManager;
import java.rmi.RemoteException;
import java.rmi.server.UnicastRemoteObject;
/**
* A remote searchable implementation.
*/
public class RemoteSearchable
extends UnicastRemoteObject
implements RMIRemoteSearchable {
private Searchable local;
/** Constructs and exports a remote searcher. */
public RemoteSearchable(Searchable local) throws RemoteException {
super();
this.local = local;
}
public void search(Weight weight, Filter filter, Collector results)
throws IOException {
local.search(weight, filter, results);
}
public void close() throws IOException {
local.close();
}
public int docFreq(Term term) throws IOException {
return local.docFreq(term);
}
public int[] docFreqs(Term[] terms) throws IOException {
return local.docFreqs(terms);
}
public int maxDoc() throws IOException {
return local.maxDoc();
}
public TopDocs search(Weight weight, Filter filter, int n) throws IOException {
return local.search(weight, filter, n);
}
public TopFieldDocs search(Weight weight, Filter filter, int n, Sort sort)
throws IOException {
return local.search (weight, filter, n, sort);
}
public Document doc(int i) throws CorruptIndexException, IOException {
return local.doc(i);
}
public Document doc(int i, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
return local.doc(i, fieldSelector);
}
public Query rewrite(Query original) throws IOException {
return local.rewrite(original);
}
public Explanation explain(Weight weight, int doc) throws IOException {
return local.explain(weight, doc);
}
/** Exports a searcher for the index in args[0] named
* "//localhost/Searchable". */
public static void main(String args[]) throws Exception {
String indexName = null;
if (args != null && args.length == 1)
indexName = args[0];
if (indexName == null) {
System.out.println("Usage: org.apache.lucene.search.RemoteSearchable <index>");
return;
}
// create and install a security manager
if (System.getSecurityManager() == null) {
System.setSecurityManager(new RMISecurityManager());
}
Searchable local = new IndexSearcher(FSDirectory.open(new File(indexName)), true);
RemoteSearchable impl = new RemoteSearchable(local);
// bind the implementation to "Searchable"
Naming.rebind("//localhost/Searchable", impl);
}
}

View File

@ -1,26 +0,0 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<html>
<head>
<title>
remote
</title>
</head>
<body>
remote
</body>
</html>

View File

@ -1,59 +0,0 @@
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import junit.framework.Assert;
import org.apache.lucene.index.IndexReader;
/**
* A unit test helper class to help with RemoteCachingWrapperFilter testing and
* assert that it is working correctly.
*/
public class RemoteCachingWrapperFilterHelper extends RemoteCachingWrapperFilter {
private boolean shouldHaveCache;
public RemoteCachingWrapperFilterHelper(Filter filter, boolean shouldHaveCache) {
super(filter);
this.shouldHaveCache = shouldHaveCache;
}
public void shouldHaveCache(boolean shouldHaveCache) {
this.shouldHaveCache = shouldHaveCache;
}
@Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException {
Filter cachedFilter = FilterManager.getInstance().getFilter(filter);
Assert.assertNotNull("Filter should not be null", cachedFilter);
if (!shouldHaveCache) {
Assert.assertSame("First time filter should be the same ", filter, cachedFilter);
} else {
Assert.assertNotSame("We should have a cached version of the filter", filter, cachedFilter);
}
if (filter instanceof CachingWrapperFilterHelper) {
((CachingWrapperFilterHelper)cachedFilter).setShouldHaveCache(shouldHaveCache);
}
return cachedFilter.getDocIdSet(reader);
}
}

View File

@ -1,79 +0,0 @@
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.ServerSocket;
import java.net.Socket;
import java.rmi.Naming;
import java.rmi.NotBoundException;
import java.rmi.RemoteException;
import java.rmi.registry.LocateRegistry;
import java.rmi.server.RMIClientSocketFactory;
import java.rmi.server.RMIServerSocketFactory;
import org.apache.lucene.util.LuceneTestCase;
import org.junit.AfterClass;
/**
* Base class for remote tests.
* <p>
* Call {@link #startServer(Searchable)} in a {@link #BeforeClass} annotated method
* to start the server.
* Call {@link #lookupRemote} to get a RemoteSearchable.
*/
public abstract class RemoteTestCase extends LuceneTestCase {
private static int port;
public static void startServer(Searchable searchable) throws Exception {
// publish it
// use our own factories for testing, so we can bind to an ephemeral port.
RMIClientSocketFactory clientFactory = new RMIClientSocketFactory() {
public Socket createSocket(String host, int port) throws IOException {
return new Socket(host, port);
}};
class TestRMIServerSocketFactory implements RMIServerSocketFactory {
ServerSocket socket;
public ServerSocket createServerSocket(int port) throws IOException {
return (socket = new ServerSocket(port));
}
};
TestRMIServerSocketFactory serverFactory = new TestRMIServerSocketFactory();
LocateRegistry.createRegistry(0, clientFactory, serverFactory);
RemoteSearchable impl = new RemoteSearchable(searchable);
port = serverFactory.socket.getLocalPort();
Naming.rebind("//localhost:" + port + "/Searchable", impl);
}
@AfterClass
public static void stopServer() {
try {
Naming.unbind("//localhost:" + port + "/Searchable");
} catch (RemoteException e) {
} catch (MalformedURLException e) {
} catch (NotBoundException e) {
}
}
public static Searchable lookupRemote() throws Exception {
return (Searchable)Naming.lookup("//localhost:" + port + "/Searchable");
}
}

View File

@ -1,129 +0,0 @@
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.util.Map;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.FilterManager.FilterItem;
import org.apache.lucene.store.Directory;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* Tests that the index is cached on the searcher side of things.
*/
public class TestRemoteCachingWrapperFilter extends RemoteTestCase {
private static Directory indexStore;
private static Searchable local;
@BeforeClass
public static void beforeClass() throws Exception {
// construct an index
indexStore = newDirectory();
IndexWriter writer = new IndexWriter(indexStore, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(newField("test", "test text", Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("type", "A", Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("other", "other test text", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
//Need a second document to search for
doc = new Document();
doc.add(newField("test", "test text", Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("type", "B", Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("other", "other test text", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.optimize();
writer.close();
local = new IndexSearcher(indexStore, true);
startServer(local);
}
@Before
public void setUp () throws Exception {
super.setUp();
// to support test iteration > 1
Map<Integer, FilterItem> cache = FilterManager.getInstance().cache;
synchronized(cache){
cache.clear();
}
}
@AfterClass
public static void afterClass() throws Exception {
local.close();
indexStore.close();
indexStore = null;
}
private static void search(Query query, Filter filter, int hitNumber, String typeValue) throws Exception {
Searchable[] searchables = { lookupRemote() };
Searcher searcher = new MultiSearcher(searchables);
ScoreDoc[] result = searcher.search(query,filter, 1000).scoreDocs;
assertEquals(1, result.length);
Document document = searcher.doc(result[hitNumber].doc);
assertTrue("document is null and it shouldn't be", document != null);
assertEquals(typeValue, document.get("type"));
assertTrue("document.getFields() Size: " + document.getFields().size() + " is not: " + 3, document.getFields().size() == 3);
}
@Test
public void testTermRemoteFilter() throws Exception {
CachingWrapperFilterHelper cwfh = new CachingWrapperFilterHelper(new QueryWrapperFilter(new TermQuery(new Term("type", "a"))));
// This is what we are fixing - if one uses a CachingWrapperFilter(Helper) it will never
// cache the filter on the remote site
cwfh.setShouldHaveCache(false);
search(new TermQuery(new Term("test", "test")), cwfh, 0, "A");
cwfh.setShouldHaveCache(false);
search(new TermQuery(new Term("test", "test")), cwfh, 0, "A");
// This is how we fix caching - we wrap a Filter in the RemoteCachingWrapperFilter(Handler - for testing)
// to cache the Filter on the searcher (remote) side
RemoteCachingWrapperFilterHelper rcwfh = new RemoteCachingWrapperFilterHelper(cwfh, false);
search(new TermQuery(new Term("test", "test")), rcwfh, 0, "A");
// 2nd time we do the search, we should be using the cached Filter
rcwfh.shouldHaveCache(true);
search(new TermQuery(new Term("test", "test")), rcwfh, 0, "A");
// assert that we get the same cached Filter, even if we create a new instance of RemoteCachingWrapperFilter(Helper)
// this should pass because the Filter parameters are the same, and the cache uses Filter's hashCode() as cache keys,
// and Filters' hashCode() builds on Filter parameters, not the Filter instance itself
rcwfh = new RemoteCachingWrapperFilterHelper(new QueryWrapperFilter(new TermQuery(new Term("type", "a"))), false);
rcwfh.shouldHaveCache(false);
search(new TermQuery(new Term("test", "test")), rcwfh, 0, "A");
rcwfh = new RemoteCachingWrapperFilterHelper(new QueryWrapperFilter(new TermQuery(new Term("type", "a"))), false);
rcwfh.shouldHaveCache(true);
search(new TermQuery(new Term("test", "test")), rcwfh, 0, "A");
// assert that we get a non-cached version of the Filter because this is a new Query (type:b)
rcwfh = new RemoteCachingWrapperFilterHelper(new QueryWrapperFilter(new TermQuery(new Term("type", "b"))), false);
rcwfh.shouldHaveCache(false);
search(new TermQuery(new Term("type", "b")), rcwfh, 0, "B");
}
}

View File

@ -1,128 +0,0 @@
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.*;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import java.util.Collections;
import java.util.Set;
import java.util.HashSet;
public class TestRemoteSearchable extends RemoteTestCase {
private static Directory indexStore;
private static Searchable local;
@BeforeClass
public static void beforeClass() throws Exception {
// construct an index
indexStore = newDirectory();
IndexWriter writer = new IndexWriter(indexStore, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(newField("test", "test text", Field.Store.YES, Field.Index.ANALYZED));
doc.add(newField("other", "other test text", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.optimize();
writer.close();
local = new IndexSearcher(indexStore, true);
startServer(local);
}
@AfterClass
public static void afterClass() throws Exception {
local.close();
indexStore.close();
indexStore = null;
}
private static void search(Query query) throws Exception {
// try to search the published index
Searchable[] searchables = { lookupRemote() };
Searcher searcher = new MultiSearcher(searchables);
ScoreDoc[] result = searcher.search(query, null, 1000).scoreDocs;
assertEquals(1, result.length);
Document document = searcher.doc(result[0].doc);
assertTrue("document is null and it shouldn't be", document != null);
assertEquals("test text", document.get("test"));
assertTrue("document.getFields() Size: " + document.getFields().size() + " is not: " + 2, document.getFields().size() == 2);
Set<String> ftl = new HashSet<String>();
ftl.add("other");
FieldSelector fs = new SetBasedFieldSelector(ftl, Collections.<String>emptySet());
document = searcher.doc(0, fs);
assertTrue("document is null and it shouldn't be", document != null);
assertTrue("document.getFields() Size: " + document.getFields().size() + " is not: " + 1, document.getFields().size() == 1);
fs = new MapFieldSelector("other");
document = searcher.doc(0, fs);
assertTrue("document is null and it shouldn't be", document != null);
assertTrue("document.getFields() Size: " + document.getFields().size() + " is not: " + 1, document.getFields().size() == 1);
}
@Test
public void testTermQuery() throws Exception {
search(new TermQuery(new Term("test", "test")));
}
@Test
public void testBooleanQuery() throws Exception {
BooleanQuery query = new BooleanQuery();
query.add(new TermQuery(new Term("test", "test")), BooleanClause.Occur.MUST);
search(query);
}
@Test
public void testPhraseQuery() throws Exception {
PhraseQuery query = new PhraseQuery();
query.add(new Term("test", "test"));
query.add(new Term("test", "text"));
search(query);
}
// Tests bug fix at http://nagoya.apache.org/bugzilla/show_bug.cgi?id=20290
@Test
public void testQueryFilter() throws Exception {
// try to search the published index
Searchable[] searchables = { lookupRemote() };
Searcher searcher = new MultiSearcher(searchables);
ScoreDoc[] hits = searcher.search(
new TermQuery(new Term("test", "text")),
new QueryWrapperFilter(new TermQuery(new Term("test", "test"))), 1000).scoreDocs;
assertEquals(1, hits.length);
ScoreDoc[] nohits = searcher.search(
new TermQuery(new Term("test", "text")),
new QueryWrapperFilter(new TermQuery(new Term("test", "non-existent-term"))), 1000).scoreDocs;
assertEquals(0, nohits.length);
}
@Test
public void testConstantScoreQuery() throws Exception {
// try to search the published index
Searchable[] searchables = { lookupRemote() };
Searcher searcher = new MultiSearcher(searchables);
ScoreDoc[] hits = searcher.search(
new ConstantScoreQuery(new TermQuery(new Term("test", "test"))), null, 1000).scoreDocs;
assertEquals(1, hits.length);
}
}

View File

@ -1,425 +0,0 @@
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Locale;
import java.util.Random;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BytesRef;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* Unit tests for remote sorting code.
* Note: This is a modified copy of {@link TestSort} without duplicated test
* methods and therefore unused members and methodes.
*/
public class TestRemoteSort extends RemoteTestCase {
private static IndexSearcher full;
private static Directory indexStore;
private Query queryX;
private Query queryY;
private Query queryA;
private Query queryF;
private Sort sort;
// document data:
// the tracer field is used to determine which document was hit
// the contents field is used to search and sort by relevance
// the int field to sort by int
// the float field to sort by float
// the string field to sort by string
// the i18n field includes accented characters for testing locale-specific sorting
private static final String[][] data = new String[][] {
// tracer contents int float string custom i18n long double, 'short', byte, 'custom parser encoding'
{ "A", "x a", "5", "4f", "c", "A-3", "p\u00EAche", "10", "-4.0", "3", "126", "J"},//A, x
{ "B", "y a", "5", "3.4028235E38", "i", "B-10", "HAT", "1000000000", "40.0", "24", "1", "I"},//B, y
{ "C", "x a b c", "2147483647", "1.0", "j", "A-2", "p\u00E9ch\u00E9", "99999999", "40.00002343", "125", "15", "H"},//C, x
{ "D", "y a b c", "-1", "0.0f", "a", "C-0", "HUT", String.valueOf(Long.MAX_VALUE), String.valueOf(Double.MIN_VALUE), String.valueOf(Short.MIN_VALUE), String.valueOf(Byte.MIN_VALUE), "G"},//D, y
{ "E", "x a b c d", "5", "2f", "h", "B-8", "peach", String.valueOf(Long.MIN_VALUE), String.valueOf(Double.MAX_VALUE), String.valueOf(Short.MAX_VALUE), String.valueOf(Byte.MAX_VALUE), "F"},//E,x
{ "F", "y a b c d", "2", "3.14159f", "g", "B-1", "H\u00C5T", "-44", "343.034435444", "-3", "0", "E"},//F,y
{ "G", "x a b c d", "3", "-1.0", "f", "C-100", "sin", "323254543543", "4.043544", "5", "100", "D"},//G,x
{ "H", "y a b c d", "0", "1.4E-45", "e", "C-88", "H\u00D8T", "1023423423005","4.043545", "10", "-50", "C"},//H,y
{ "I", "x a b c d e f", "-2147483648", "1.0e+0", "d", "A-10", "s\u00EDn", "332422459999", "4.043546", "-340", "51", "B"},//I,x
{ "J", "y a b c d e f", "4", ".5", "b", "C-7", "HOT", "34334543543", "4.0000220343", "300", "2", "A"},//J,y
{ "W", "g", "1", null, null, null, null, null, null, null, null, null},
{ "X", "g", "1", "0.1", null, null, null, null, null, null, null, null},
{ "Y", "g", "1", "0.2", null, null, null, null, null, null, null, null},
{ "Z", "f g", null, null, null, null, null, null, null, null, null, null}
};
// create an index of all the documents, or just the x, or just the y documents
@BeforeClass
public static void beforeClass() throws Exception {
indexStore = newDirectory();
IndexWriter writer = new IndexWriter(
indexStore,
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
setMaxBufferedDocs(2).
setMergePolicy(newLogMergePolicy(1000))
);
for (int i=0; i<data.length; ++i) {
Document doc = new Document();
doc.add (new Field ("tracer", data[i][0], Field.Store.YES, Field.Index.NO));
doc.add (new Field ("contents", data[i][1], Field.Store.NO, Field.Index.ANALYZED));
if (data[i][2] != null) doc.add (new Field ("int", data[i][2], Field.Store.NO, Field.Index.NOT_ANALYZED));
if (data[i][3] != null) doc.add (new Field ("float", data[i][3], Field.Store.NO, Field.Index.NOT_ANALYZED));
if (data[i][4] != null) doc.add (new Field ("string", data[i][4], Field.Store.NO, Field.Index.NOT_ANALYZED));
if (data[i][5] != null) doc.add (new Field ("custom", data[i][5], Field.Store.NO, Field.Index.NOT_ANALYZED));
if (data[i][6] != null) doc.add (new Field ("i18n", data[i][6], Field.Store.NO, Field.Index.NOT_ANALYZED));
if (data[i][7] != null) doc.add (new Field ("long", data[i][7], Field.Store.NO, Field.Index.NOT_ANALYZED));
if (data[i][8] != null) doc.add (new Field ("double", data[i][8], Field.Store.NO, Field.Index.NOT_ANALYZED));
if (data[i][9] != null) doc.add (new Field ("short", data[i][9], Field.Store.NO, Field.Index.NOT_ANALYZED));
if (data[i][10] != null) doc.add (new Field ("byte", data[i][10], Field.Store.NO, Field.Index.NOT_ANALYZED));
if (data[i][11] != null) doc.add (new Field ("parser", data[i][11], Field.Store.NO, Field.Index.NOT_ANALYZED));
doc.setBoost(2); // produce some scores above 1.0
writer.addDocument (doc);
}
//writer.optimize ();
writer.close ();
full = new IndexSearcher (indexStore, false);
full.setDefaultFieldSortScoring(true, true);
startServer(full);
}
@AfterClass
public static void afterClass() throws Exception {
full.close();
full = null;
indexStore.close();
indexStore = null;
}
public String getRandomNumberString(int num, int low, int high) {
StringBuilder sb = new StringBuilder();
for (int i = 0; i < num; i++) {
sb.append(getRandomNumber(low, high));
}
return sb.toString();
}
public String getRandomCharString(int num) {
return getRandomCharString(num, 48, 122);
}
public String getRandomCharString(int num, int start, int end) {
StringBuilder sb = new StringBuilder();
for (int i = 0; i < num; i++) {
sb.append(new Character((char) getRandomNumber(start, end)));
}
return sb.toString();
}
Random r;
public int getRandomNumber(final int low, final int high) {
int randInt = (Math.abs(r.nextInt()) % (high - low)) + low;
return randInt;
}
@Override
@Before
public void setUp() throws Exception {
super.setUp();
queryX = new TermQuery (new Term ("contents", "x"));
queryY = new TermQuery (new Term ("contents", "y"));
queryA = new TermQuery (new Term ("contents", "a"));
queryF = new TermQuery (new Term ("contents", "f"));
sort = new Sort();
}
static class MyFieldComparator extends FieldComparator {
int[] docValues;
int[] slotValues;
int bottomValue;
MyFieldComparator(int numHits) {
slotValues = new int[numHits];
}
@Override
public void copy(int slot, int doc) {
slotValues[slot] = docValues[doc];
}
@Override
public int compare(int slot1, int slot2) {
return slotValues[slot1] - slotValues[slot2];
}
@Override
public int compareBottom(int doc) {
return bottomValue - docValues[doc];
}
@Override
public void setBottom(int bottom) {
bottomValue = slotValues[bottom];
}
@Override
public FieldComparator setNextReader(IndexReader reader, int docBase) throws IOException {
docValues = FieldCache.DEFAULT.getInts(reader, "parser", new FieldCache.IntParser() {
public final int parseInt(BytesRef termRef) {
return (termRef.utf8ToString().charAt(0)-'A') * 123456;
}
});
return this;
}
@Override
public Comparable<?> value(int slot) {
return Integer.valueOf(slotValues[slot]);
}
}
static class MyFieldComparatorSource extends FieldComparatorSource {
@Override
public FieldComparator newComparator(String fieldname, int numHits, int sortPos, boolean reversed) {
return new MyFieldComparator(numHits);
}
}
// test a variety of sorts using a remote searcher
@Test
public void testRemoteSort() throws Exception {
Searchable searcher = lookupRemote();
MultiSearcher multi = new MultiSearcher (searcher);
runMultiSorts(multi, true); // this runs on the full index
}
// test custom search when remote
/* rewrite with new API
public void testRemoteCustomSort() throws Exception {
Searchable searcher = getRemote();
MultiSearcher multi = new MultiSearcher (new Searchable[] { searcher });
sort.setSort (new SortField ("custom", SampleComparable.getComparatorSource()));
assertMatches (multi, queryX, sort, "CAIEG");
sort.setSort (new SortField ("custom", SampleComparable.getComparatorSource(), true));
assertMatches (multi, queryY, sort, "HJDBF");
assertSaneFieldCaches(getName() + " ComparatorSource");
FieldCache.DEFAULT.purgeAllCaches();
SortComparator custom = SampleComparable.getComparator();
sort.setSort (new SortField ("custom", custom));
assertMatches (multi, queryX, sort, "CAIEG");
sort.setSort (new SortField ("custom", custom, true));
assertMatches (multi, queryY, sort, "HJDBF");
assertSaneFieldCaches(getName() + " Comparator");
FieldCache.DEFAULT.purgeAllCaches();
}*/
// test that the relevancy scores are the same even if
// hits are sorted
@Test
public void testNormalizedScores() throws Exception {
// capture relevancy scores
HashMap<String,Float> scoresX = getScores (full.search (queryX, null, 1000).scoreDocs, full);
HashMap<String,Float> scoresY = getScores (full.search (queryY, null, 1000).scoreDocs, full);
HashMap<String,Float> scoresA = getScores (full.search (queryA, null, 1000).scoreDocs, full);
// we'll test searching locally, remote and multi
MultiSearcher remote = new MultiSearcher (lookupRemote());
// change sorting and make sure relevancy stays the same
sort = new Sort();
assertSameValues (scoresX, getScores (remote.search (queryX, null, 1000, sort).scoreDocs, remote));
assertSameValues (scoresY, getScores (remote.search (queryY, null, 1000, sort).scoreDocs, remote));
assertSameValues (scoresA, getScores (remote.search (queryA, null, 1000, sort).scoreDocs, remote));
sort.setSort(SortField.FIELD_DOC);
assertSameValues (scoresX, getScores (remote.search (queryX, null, 1000, sort).scoreDocs, remote));
assertSameValues (scoresY, getScores (remote.search (queryY, null, 1000, sort).scoreDocs, remote));
assertSameValues (scoresA, getScores (remote.search (queryA, null, 1000, sort).scoreDocs, remote));
sort.setSort (new SortField("int", SortField.INT));
assertSameValues (scoresX, getScores (remote.search (queryX, null, 1000, sort).scoreDocs, remote));
assertSameValues (scoresY, getScores (remote.search (queryY, null, 1000, sort).scoreDocs, remote));
assertSameValues (scoresA, getScores (remote.search (queryA, null, 1000, sort).scoreDocs, remote));
sort.setSort (new SortField("float", SortField.FLOAT));
assertSameValues (scoresX, getScores (remote.search (queryX, null, 1000, sort).scoreDocs, remote));
assertSameValues (scoresY, getScores (remote.search (queryY, null, 1000, sort).scoreDocs, remote));
assertSameValues (scoresA, getScores (remote.search (queryA, null, 1000, sort).scoreDocs, remote));
sort.setSort (new SortField("string", SortField.STRING));
assertSameValues (scoresX, getScores (remote.search (queryX, null, 1000, sort).scoreDocs, remote));
assertSameValues (scoresY, getScores (remote.search (queryY, null, 1000, sort).scoreDocs, remote));
assertSameValues (scoresA, getScores (remote.search (queryA, null, 1000, sort).scoreDocs, remote));
sort.setSort (new SortField("int", SortField.INT), new SortField("float", SortField.FLOAT));
assertSameValues (scoresX, getScores (remote.search (queryX, null, 1000, sort).scoreDocs, remote));
assertSameValues (scoresY, getScores (remote.search (queryY, null, 1000, sort).scoreDocs, remote));
assertSameValues (scoresA, getScores (remote.search (queryA, null, 1000, sort).scoreDocs, remote));
sort.setSort (new SortField ("int", SortField.INT, true), new SortField (null, SortField.DOC, true) );
assertSameValues (scoresX, getScores (remote.search (queryX, null, 1000, sort).scoreDocs, remote));
assertSameValues (scoresY, getScores (remote.search (queryY, null, 1000, sort).scoreDocs, remote));
assertSameValues (scoresA, getScores (remote.search (queryA, null, 1000, sort).scoreDocs, remote));
sort.setSort (new SortField("float", SortField.FLOAT));
assertSameValues (scoresX, getScores (remote.search (queryX, null, 1000, sort).scoreDocs, remote));
assertSameValues (scoresY, getScores (remote.search (queryY, null, 1000, sort).scoreDocs, remote));
assertSameValues (scoresA, getScores (remote.search (queryA, null, 1000, sort).scoreDocs, remote));
}
// runs a variety of sorts useful for multisearchers
private void runMultiSorts(Searcher multi, boolean isFull) throws Exception {
sort.setSort(SortField.FIELD_DOC);
String expected = isFull ? "ABCDEFGHIJ" : "ACEGIBDFHJ";
assertMatches(multi, queryA, sort, expected);
sort.setSort(new SortField ("int", SortField.INT));
expected = isFull ? "IDHFGJABEC" : "IDHFGJAEBC";
assertMatches(multi, queryA, sort, expected);
sort.setSort(new SortField ("int", SortField.INT), SortField.FIELD_DOC);
expected = isFull ? "IDHFGJABEC" : "IDHFGJAEBC";
assertMatches(multi, queryA, sort, expected);
sort.setSort(new SortField ("int", SortField.INT));
expected = isFull ? "IDHFGJABEC" : "IDHFGJAEBC";
assertMatches(multi, queryA, sort, expected);
sort.setSort(new SortField ("float", SortField.FLOAT), SortField.FIELD_DOC);
assertMatches(multi, queryA, sort, "GDHJCIEFAB");
sort.setSort(new SortField("float", SortField.FLOAT));
assertMatches(multi, queryA, sort, "GDHJCIEFAB");
sort.setSort(new SortField("string", SortField.STRING));
assertMatches(multi, queryA, sort, "DJAIHGFEBC");
sort.setSort(new SortField ("int", SortField.INT, true));
expected = isFull ? "CABEJGFHDI" : "CAEBJGFHDI";
assertMatches(multi, queryA, sort, expected);
sort.setSort(new SortField ("float", SortField.FLOAT, true));
assertMatches(multi, queryA, sort, "BAFECIJHDG");
sort.setSort(new SortField ("string", SortField.STRING, true));
assertMatches(multi, queryA, sort, "CBEFGHIAJD");
sort.setSort(new SortField ("int", SortField.INT), new SortField ("float", SortField.FLOAT));
assertMatches(multi, queryA, sort, "IDHFGJEABC");
sort.setSort(new SortField ("float", SortField.FLOAT), new SortField ("string", SortField.STRING));
assertMatches(multi, queryA, sort, "GDHJICEFAB");
sort.setSort(new SortField ("int", SortField.INT));
assertMatches(multi, queryF, sort, "IZJ");
sort.setSort(new SortField ("int", SortField.INT, true));
assertMatches(multi, queryF, sort, "JZI");
sort.setSort(new SortField ("float", SortField.FLOAT));
assertMatches(multi, queryF, sort, "ZJI");
sort.setSort(new SortField ("string", SortField.STRING));
assertMatches(multi, queryF, sort, "ZJI");
sort.setSort(new SortField ("string", SortField.STRING, true));
assertMatches(multi, queryF, sort, "IJZ");
// up to this point, all of the searches should have "sane"
// FieldCache behavior, and should have reused hte cache in several cases
assertSaneFieldCaches(getName() + " Basics");
// next we'll check an alternate Locale for string, so purge first
FieldCache.DEFAULT.purgeAllCaches();
sort.setSort(new SortField ("string", Locale.US) );
assertMatches(multi, queryA, sort, "DJAIHGFEBC");
sort.setSort(new SortField ("string", Locale.US, true));
assertMatches(multi, queryA, sort, "CBEFGHIAJD");
assertSaneFieldCaches(getName() + " Locale.US");
FieldCache.DEFAULT.purgeAllCaches();
}
// make sure the documents returned by the search match the expected list
private void assertMatches(Searcher searcher, Query query, Sort sort,
String expectedResult) throws IOException {
//ScoreDoc[] result = searcher.search (query, null, 1000, sort).scoreDocs;
TopDocs hits = searcher.search (query, null, expectedResult.length(), sort);
ScoreDoc[] result = hits.scoreDocs;
assertEquals(hits.totalHits, expectedResult.length());
StringBuilder buff = new StringBuilder(10);
int n = result.length;
for (int i=0; i<n; ++i) {
Document doc = searcher.doc(result[i].doc);
String[] v = doc.getValues("tracer");
for (int j=0; j<v.length; ++j) {
buff.append (v[j]);
}
}
assertEquals (expectedResult, buff.toString());
}
private HashMap<String, Float> getScores (ScoreDoc[] hits, Searcher searcher)
throws IOException {
HashMap<String, Float> scoreMap = new HashMap<String, Float>();
int n = hits.length;
for (int i=0; i<n; ++i) {
Document doc = searcher.doc(hits[i].doc);
String[] v = doc.getValues("tracer");
assertEquals (v.length, 1);
scoreMap.put (v[0], Float.valueOf(hits[i].score));
}
return scoreMap;
}
// make sure all the values in the maps match
private void assertSameValues (HashMap<?, ?> m1, HashMap<?, ?> m2) {
int n = m1.size();
int m = m2.size();
assertEquals (n, m);
Iterator<?> iter = m1.keySet().iterator();
while (iter.hasNext()) {
Object key = iter.next();
Object o1 = m1.get(key);
Object o2 = m2.get(key);
if (o1 instanceof Float) {
assertEquals(((Float)o1).floatValue(), ((Float)o2).floatValue(), 1e-6);
} else {
assertEquals (m1.get(key), m2.get(key));
}
}
}
}

View File

@ -252,7 +252,7 @@ public class TestCartesian extends LuceneTestCase {
// Perform the search, using the term query, the serial chain filter, and the
// distance sort
TopDocs hits = searcher.search(customScore.createWeight(searcher),null, 1000, sort);
TopDocs hits = searcher.search(customScore,null, 1000, sort);
int results = hits.totalHits;
ScoreDoc[] scoreDocs = hits.scoreDocs;
@ -348,7 +348,7 @@ public class TestCartesian extends LuceneTestCase {
// Perform the search, using the term query, the serial chain filter, and the
// distance sort
TopDocs hits = searcher.search(customScore.createWeight(searcher),null, 1000, sort);
TopDocs hits = searcher.search(customScore,null, 1000, sort);
int results = hits.totalHits;
ScoreDoc[] scoreDocs = hits.scoreDocs;
@ -444,7 +444,7 @@ public class TestCartesian extends LuceneTestCase {
// Perform the search, using the term query, the serial chain filter, and the
// distance sort
TopDocs hits = searcher.search(customScore.createWeight(searcher),null, 1000, sort);
TopDocs hits = searcher.search(customScore,null, 1000, sort);
int results = hits.totalHits;
ScoreDoc[] scoreDocs = hits.scoreDocs;
@ -539,7 +539,7 @@ public class TestCartesian extends LuceneTestCase {
// Perform the search, using the term query, the serial chain filter, and the
// distance sort
TopDocs hits = searcher.search(customScore.createWeight(searcher),dq.getFilter(), 1000); //,sort);
TopDocs hits = searcher.search(customScore,dq.getFilter(), 1000); //,sort);
int results = hits.totalHits;
ScoreDoc[] scoreDocs = hits.scoreDocs;

View File

@ -39,7 +39,6 @@ import org.apache.lucene.search.Collector;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;
@ -104,7 +103,7 @@ public final class SynExpand {
* @return the expanded Query
*/
public static Query expand( String query,
Searcher syns,
IndexSearcher syns,
Analyzer a,
String f,
final float boost)

View File

@ -39,7 +39,6 @@ import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.store.FSDirectory;
@ -114,7 +113,7 @@ public class SynLookup {
* @param boost
*/
public static Query expand( String query,
Searcher syns,
IndexSearcher syns,
Analyzer a,
final String field,
final float boost)

View File

@ -26,13 +26,12 @@ import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase;
public class TestWordnet extends LuceneTestCase {
private Searcher searcher;
private IndexSearcher searcher;
private Directory dir;
String storePathName = new File(TEMP_DIR,"testLuceneWordnet").getAbsolutePath();

View File

@ -19,7 +19,6 @@ package org.apache.lucene.document;
import java.util.*; // for javadoc
import org.apache.lucene.search.ScoreDoc; // for javadoc
import org.apache.lucene.search.Searcher; // for javadoc
import org.apache.lucene.index.IndexReader; // for javadoc
/** Documents are the unit of indexing and search.
@ -32,8 +31,7 @@ import org.apache.lucene.index.IndexReader; // for javadoc
*
* <p>Note that fields which are <i>not</i> {@link Fieldable#isStored() stored} are
* <i>not</i> available in documents retrieved from the index, e.g. with {@link
* ScoreDoc#doc}, {@link Searcher#doc(int)} or {@link
* IndexReader#document(int)}.
* ScoreDoc#doc} or {@link IndexReader#document(int)}.
*/
public final class Document implements java.io.Serializable {

View File

@ -338,10 +338,18 @@ public abstract class LogMergePolicy extends MergePolicy {
int maxNumSegments, Set<SegmentInfo> segmentsToOptimize) throws IOException {
assert maxNumSegments > 0;
if (verbose()) {
message("findMergesForOptimize: maxNumSegs=" + maxNumSegments + " segsToOptimize= "+ segmentsToOptimize);
}
// If the segments are already optimized (e.g. there's only 1 segment), or
// there are <maxNumSegements, all optimized, nothing to do.
if (isOptimized(infos, maxNumSegments, segmentsToOptimize)) return null;
if (isOptimized(infos, maxNumSegments, segmentsToOptimize)) {
if (verbose()) {
message("already optimized; skip");
}
return null;
}
// Find the newest (rightmost) segment that needs to
// be optimized (other segments may have been flushed
@ -355,10 +363,20 @@ public abstract class LogMergePolicy extends MergePolicy {
}
}
if (last == 0) return null;
if (last == 0) {
if (verbose()) {
message("last == 0; skip");
}
return null;
}
// There is only one segment already, and it is optimized
if (maxNumSegments == 1 && last == 1 && isOptimized(infos.info(0))) return null;
if (maxNumSegments == 1 && last == 1 && isOptimized(infos.info(0))) {
if (verbose()) {
message("already 1 seg; skip");
}
return null;
}
// Check if there are any segments above the threshold
boolean anyTooLarge = false;

View File

@ -89,7 +89,7 @@ public class BooleanQuery extends Query implements Iterable<BooleanClause> {
// Implement coord disabling.
// Inherit javadoc.
@Override
public Similarity getSimilarity(Searcher searcher) {
public Similarity getSimilarity(IndexSearcher searcher) {
Similarity result = super.getSimilarity(searcher);
if (disableCoord) { // disable coord as requested
result = new SimilarityDelegator(result) {
@ -179,7 +179,7 @@ public class BooleanQuery extends Query implements Iterable<BooleanClause> {
protected ArrayList<Weight> weights;
protected int maxCoord; // num optional + num required
public BooleanWeight(Searcher searcher)
public BooleanWeight(IndexSearcher searcher)
throws IOException {
this.similarity = getSimilarity(searcher);
weights = new ArrayList<Weight>(clauses.size());
@ -362,7 +362,7 @@ public class BooleanQuery extends Query implements Iterable<BooleanClause> {
}
@Override
public Weight createWeight(Searcher searcher) throws IOException {
public Weight createWeight(IndexSearcher searcher) throws IOException {
return new BooleanWeight(searcher);
}

View File

@ -100,7 +100,7 @@ public class ConstantScoreQuery extends Query {
private float queryNorm;
private float queryWeight;
public ConstantWeight(Searcher searcher) throws IOException {
public ConstantWeight(IndexSearcher searcher) throws IOException {
this.similarity = getSimilarity(searcher);
this.innerWeight = (query == null) ? null : query.createWeight(searcher);
}
@ -256,7 +256,7 @@ public class ConstantScoreQuery extends Query {
}
@Override
public Weight createWeight(Searcher searcher) throws IOException {
public Weight createWeight(IndexSearcher searcher) throws IOException {
return new ConstantScoreQuery.ConstantWeight(searcher);
}

View File

@ -101,7 +101,7 @@ public class DisjunctionMaxQuery extends Query implements Iterable<Query> {
protected ArrayList<Weight> weights = new ArrayList<Weight>(); // The Weight's for our subqueries, in 1-1 correspondence with disjuncts
/* Construct the Weight for this Query searched by searcher. Recursively construct subquery weights. */
public DisjunctionMaxWeight(Searcher searcher) throws IOException {
public DisjunctionMaxWeight(IndexSearcher searcher) throws IOException {
this.similarity = searcher.getSimilarity();
for (Query disjunctQuery : disjuncts) {
weights.add(disjunctQuery.createWeight(searcher));
@ -180,7 +180,7 @@ public class DisjunctionMaxQuery extends Query implements Iterable<Query> {
/* Create the Weight used to score us */
@Override
public Weight createWeight(Searcher searcher) throws IOException {
public Weight createWeight(IndexSearcher searcher) throws IOException {
return new DisjunctionMaxWeight(searcher);
}

View File

@ -59,7 +59,7 @@ extends Query {
* This is accomplished by overriding the Scorer returned by the Weight.
*/
@Override
public Weight createWeight(final Searcher searcher) throws IOException {
public Weight createWeight(final IndexSearcher searcher) throws IOException {
final Weight weight = query.createWeight (searcher);
final Similarity similarity = query.getSimilarity(searcher);
return new Weight() {

View File

@ -19,7 +19,17 @@ package org.apache.lucene.search;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.NoSuchElementException;
import java.util.concurrent.Callable;
import java.util.concurrent.CompletionService;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executor;
import java.util.concurrent.ExecutorCompletionService;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector;
@ -28,6 +38,7 @@ import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.ReaderUtil;
import org.apache.lucene.util.ThreadInterruptedException;
/** Implements search over a single IndexReader.
*
@ -44,14 +55,19 @@ import org.apache.lucene.util.ReaderUtil;
* synchronize on the <code>IndexSearcher</code> instance;
* use your own (non-Lucene) objects instead.</p>
*/
public class IndexSearcher extends Searcher {
public class IndexSearcher {
IndexReader reader;
private boolean closeReader;
// NOTE: these members might change in incompatible ways
// in the next release
protected IndexReader[] subReaders;
protected int[] docStarts;
protected final IndexReader[] subReaders;
protected final IndexSearcher[] subSearchers;
protected final int[] docStarts;
private final ExecutorService executor;
/** The Similarity implementation used by this searcher. */
private Similarity similarity = Similarity.getDefault();
/** Creates a searcher searching the index in the named
* directory, with readOnly=true
@ -60,7 +76,7 @@ public class IndexSearcher extends Searcher {
* @throws IOException if there is a low-level IO error
*/
public IndexSearcher(Directory path) throws CorruptIndexException, IOException {
this(IndexReader.open(path, true), true);
this(IndexReader.open(path, true), true, null);
}
/** Creates a searcher searching the index in the named
@ -75,12 +91,27 @@ public class IndexSearcher extends Searcher {
* @throws IOException if there is a low-level IO error
*/
public IndexSearcher(Directory path, boolean readOnly) throws CorruptIndexException, IOException {
this(IndexReader.open(path, readOnly), true);
this(IndexReader.open(path, readOnly), true, null);
}
/** Creates a searcher searching the provided index. */
public IndexSearcher(IndexReader r) {
this(r, false);
this(r, false, null);
}
/** Runs searches for each segment separately, using the
* provided ExecutorService. IndexSearcher will not
* shutdown/awaitTermination this ExecutorService on
* close; you must do so, eventually, on your own. NOTE:
* if you are using {@link NIOFSDirectory}, do not use
* the shutdownNow method of ExecutorService as this uses
* Thread.interrupt under-the-hood which can silently
* close file descriptors (see <a
* href="https://issues.apache.org/jira/browse/LUCENE-2239">LUCENE-2239</a>).
*
* @lucene.experimental */
public IndexSearcher(IndexReader r, ExecutorService executor) {
this(r, false, executor);
}
/** Expert: directly specify the reader, subReaders and
@ -91,21 +122,58 @@ public class IndexSearcher extends Searcher {
this.reader = reader;
this.subReaders = subReaders;
this.docStarts = docStarts;
subSearchers = new IndexSearcher[subReaders.length];
for(int i=0;i<subReaders.length;i++) {
subSearchers[i] = new IndexSearcher(subReaders[i]);
}
closeReader = false;
executor = null;
}
private IndexSearcher(IndexReader r, boolean closeReader) {
/** Expert: directly specify the reader, subReaders and
* their docID starts, and an ExecutorService. In this
* case, each segment will be separately searched using the
* ExecutorService. IndexSearcher will not
* shutdown/awaitTermination this ExecutorService on
* close; you must do so, eventually, on your own. NOTE:
* if you are using {@link NIOFSDirectory}, do not use
* the shutdownNow method of ExecutorService as this uses
* Thread.interrupt under-the-hood which can silently
* close file descriptors (see <a
* href="https://issues.apache.org/jira/browse/LUCENE-2239">LUCENE-2239</a>).
*
* @lucene.experimental */
public IndexSearcher(IndexReader reader, IndexReader[] subReaders, int[] docStarts, ExecutorService executor) {
this.reader = reader;
this.subReaders = subReaders;
this.docStarts = docStarts;
subSearchers = new IndexSearcher[subReaders.length];
for(int i=0;i<subReaders.length;i++) {
subSearchers[i] = new IndexSearcher(subReaders[i]);
}
closeReader = false;
this.executor = executor;
}
private IndexSearcher(IndexReader r, boolean closeReader, ExecutorService executor) {
reader = r;
this.executor = executor;
this.closeReader = closeReader;
List<IndexReader> subReadersList = new ArrayList<IndexReader>();
gatherSubReaders(subReadersList, reader);
subReaders = subReadersList.toArray(new IndexReader[subReadersList.size()]);
docStarts = new int[subReaders.length];
subSearchers = new IndexSearcher[subReaders.length];
int maxDoc = 0;
for (int i = 0; i < subReaders.length; i++) {
docStarts[i] = maxDoc;
maxDoc += subReaders[i].maxDoc();
if (subReaders[i] == r) {
subSearchers[i] = this;
} else {
subSearchers[i] = new IndexSearcher(subReaders[i]);
}
}
}
@ -118,59 +186,219 @@ public class IndexSearcher extends Searcher {
return reader;
}
/** Returns the atomic subReaders used by this searcher. */
public IndexReader[] getSubReaders() {
return subReaders;
}
/** Expert: Returns one greater than the largest possible document number.
*
* @see org.apache.lucene.index.IndexReader#maxDoc()
*/
public int maxDoc() {
return reader.maxDoc();
}
/** Returns total docFreq for this term. */
public int docFreq(final Term term) throws IOException {
if (executor == null) {
return reader.docFreq(term);
} else {
final ExecutionHelper<Integer> runner = new ExecutionHelper<Integer>(executor);
for(int i = 0; i < subReaders.length; i++) {
final IndexSearcher searchable = subSearchers[i];
runner.submit(new Callable<Integer>() {
public Integer call() throws IOException {
return Integer.valueOf(searchable.docFreq(term));
}
});
}
int docFreq = 0;
for (Integer num : runner) {
docFreq += num.intValue();
}
return docFreq;
}
}
/* Sugar for .getIndexReader().document(docID) */
public Document doc(int docID) throws CorruptIndexException, IOException {
return reader.document(docID);
}
/* Sugar for .getIndexReader().document(docID, fieldSelector) */
public Document doc(int docID, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
return reader.document(docID, fieldSelector);
}
/** Expert: Set the Similarity implementation used by this Searcher.
*
* @see Similarity#setDefault(Similarity)
*/
public void setSimilarity(Similarity similarity) {
this.similarity = similarity;
}
public Similarity getSimilarity() {
return similarity;
}
/**
* Note that the underlying IndexReader is not closed, if
* IndexSearcher was constructed with IndexSearcher(IndexReader r).
* If the IndexReader was supplied implicitly by specifying a directory, then
* the IndexReader gets closed.
* the IndexReader is closed.
*/
@Override
public void close() throws IOException {
if(closeReader)
if (closeReader) {
reader.close();
}
// inherit javadoc
@Override
public int docFreq(Term term) throws IOException {
return reader.docFreq(term);
}
// inherit javadoc
@Override
public Document doc(int i) throws CorruptIndexException, IOException {
return reader.document(i);
/** Finds the top <code>n</code>
* hits for <code>query</code>.
*
* @throws BooleanQuery.TooManyClauses
*/
public TopDocs search(Query query, int n)
throws IOException {
return search(query, null, n);
}
// inherit javadoc
@Override
public Document doc(int i, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
return reader.document(i, fieldSelector);
/** Finds the top <code>n</code>
* hits for <code>query</code>, applying <code>filter</code> if non-null.
*
* @throws BooleanQuery.TooManyClauses
*/
public TopDocs search(Query query, Filter filter, int n)
throws IOException {
return search(createWeight(query), filter, n);
}
// inherit javadoc
@Override
public int maxDoc() throws IOException {
return reader.maxDoc();
/** Lower-level search API.
*
* <p>{@link Collector#collect(int)} is called for every matching
* document.
* <br>Collector-based access to remote indexes is discouraged.
*
* <p>Applications should only use this if they need <i>all</i> of the
* matching documents. The high-level search API ({@link
* Searcher#search(Query, Filter, int)}) is usually more efficient, as it skips
* non-high-scoring hits.
*
* @param query to match documents
* @param filter if non-null, used to permit documents to be collected.
* @param results to receive hits
* @throws BooleanQuery.TooManyClauses
*/
public void search(Query query, Filter filter, Collector results)
throws IOException {
search(createWeight(query), filter, results);
}
// inherit javadoc
@Override
public TopDocs search(Weight weight, Filter filter, int nDocs) throws IOException {
/** Lower-level search API.
*
* <p>{@link Collector#collect(int)} is called for every matching document.
*
* <p>Applications should only use this if they need <i>all</i> of the
* matching documents. The high-level search API ({@link
* Searcher#search(Query, int)}) is usually more efficient, as it skips
* non-high-scoring hits.
* <p>Note: The <code>score</code> passed to this method is a raw score.
* In other words, the score will not necessarily be a float whose value is
* between 0 and 1.
* @throws BooleanQuery.TooManyClauses
*/
public void search(Query query, Collector results)
throws IOException {
search(createWeight(query), null, results);
}
/** Search implementation with arbitrary sorting. Finds
* the top <code>n</code> hits for <code>query</code>, applying
* <code>filter</code> if non-null, and sorting the hits by the criteria in
* <code>sort</code>.
*
* <p>NOTE: this does not compute scores by default; use
* {@link IndexSearcher#setDefaultFieldSortScoring} to
* enable scoring.
*
* @throws BooleanQuery.TooManyClauses
*/
public TopFieldDocs search(Query query, Filter filter, int n,
Sort sort) throws IOException {
return search(createWeight(query), filter, n, sort);
}
/**
* Search implementation with arbitrary sorting and no filter.
* @param query The query to search for
* @param n Return only the top n results
* @param sort The {@link org.apache.lucene.search.Sort} object
* @return The top docs, sorted according to the supplied {@link org.apache.lucene.search.Sort} instance
* @throws IOException
*/
public TopFieldDocs search(Query query, int n,
Sort sort) throws IOException {
return search(createWeight(query), null, n, sort);
}
/** Expert: Low-level search implementation. Finds the top <code>n</code>
* hits for <code>query</code>, applying <code>filter</code> if non-null.
*
* <p>Applications should usually call {@link Searcher#search(Query,int)} or
* {@link Searcher#search(Query,Filter,int)} instead.
* @throws BooleanQuery.TooManyClauses
*/
protected TopDocs search(Weight weight, Filter filter, int nDocs) throws IOException {
if (executor == null) {
// single thread
int limit = reader.maxDoc();
if (limit == 0) {
limit = 1;
}
nDocs = Math.min(nDocs, limit);
TopScoreDocCollector collector = TopScoreDocCollector.create(nDocs, !weight.scoresDocsOutOfOrder());
search(weight, filter, collector);
return collector.topDocs();
} else {
final HitQueue hq = new HitQueue(nDocs, false);
final Lock lock = new ReentrantLock();
final ExecutionHelper<TopDocs> runner = new ExecutionHelper<TopDocs>(executor);
for (int i = 0; i < subReaders.length; i++) { // search each sub
runner.submit(
new MultiSearcherCallableNoSort(lock, subSearchers[i], weight, filter, nDocs, hq, i, docStarts));
}
@Override
public TopFieldDocs search(Weight weight, Filter filter,
int totalHits = 0;
float maxScore = Float.NEGATIVE_INFINITY;
for (final TopDocs topDocs : runner) {
totalHits += topDocs.totalHits;
maxScore = Math.max(maxScore, topDocs.getMaxScore());
}
final ScoreDoc[] scoreDocs = new ScoreDoc[hq.size()];
for (int i = hq.size() - 1; i >= 0; i--) // put docs in array
scoreDocs[i] = hq.pop();
return new TopDocs(totalHits, scoreDocs, maxScore);
}
}
/** Expert: Low-level search implementation with arbitrary sorting. Finds
* the top <code>n</code> hits for <code>query</code>, applying
* <code>filter</code> if non-null, and sorting the hits by the criteria in
* <code>sort</code>.
*
* <p>Applications should usually call {@link
* Searcher#search(Query,Filter,int,Sort)} instead.
*
* @throws BooleanQuery.TooManyClauses
*/
protected TopFieldDocs search(Weight weight, Filter filter,
final int nDocs, Sort sort) throws IOException {
return search(weight, filter, nDocs, sort, true);
}
@ -186,10 +414,14 @@ public class IndexSearcher extends Searcher {
* then pass that to {@link #search(Weight, Filter,
* Collector)}.</p>
*/
public TopFieldDocs search(Weight weight, Filter filter, int nDocs,
protected TopFieldDocs search(Weight weight, Filter filter, int nDocs,
Sort sort, boolean fillFields)
throws IOException {
if (sort == null) throw new NullPointerException();
if (executor == null) {
// single thread
int limit = reader.maxDoc();
if (limit == 0) {
limit = 1;
@ -200,12 +432,56 @@ public class IndexSearcher extends Searcher {
fillFields, fieldSortDoTrackScores, fieldSortDoMaxScore, !weight.scoresDocsOutOfOrder());
search(weight, filter, collector);
return (TopFieldDocs) collector.topDocs();
} else {
// TODO: make this respect fillFields
final FieldDocSortedHitQueue hq = new FieldDocSortedHitQueue(nDocs);
final Lock lock = new ReentrantLock();
final ExecutionHelper<TopFieldDocs> runner = new ExecutionHelper<TopFieldDocs>(executor);
for (int i = 0; i < subReaders.length; i++) { // search each sub
runner.submit(
new MultiSearcherCallableWithSort(lock, subSearchers[i], weight, filter, nDocs, hq, sort, i, docStarts));
}
int totalHits = 0;
float maxScore = Float.NEGATIVE_INFINITY;
for (final TopFieldDocs topFieldDocs : runner) {
totalHits += topFieldDocs.totalHits;
maxScore = Math.max(maxScore, topFieldDocs.getMaxScore());
}
final ScoreDoc[] scoreDocs = new ScoreDoc[hq.size()];
for (int i = hq.size() - 1; i >= 0; i--) // put docs in array
scoreDocs[i] = hq.pop();
return new TopFieldDocs(totalHits, scoreDocs, hq.getFields(), maxScore);
}
}
@Override
public void search(Weight weight, Filter filter, Collector collector)
/**
* Lower-level search API.
*
* <p>
* {@link Collector#collect(int)} is called for every document. <br>
* Collector-based access to remote indexes is discouraged.
*
* <p>
* Applications should only use this if they need <i>all</i> of the matching
* documents. The high-level search API ({@link Searcher#search(Query,int)}) is
* usually more efficient, as it skips non-high-scoring hits.
*
* @param weight
* to match documents
* @param filter
* if non-null, used to permit documents to be collected.
* @param collector
* to receive hits
* @throws BooleanQuery.TooManyClauses
*/
protected void search(Weight weight, Filter filter, Collector collector)
throws IOException {
// TODO: should we make this
// threaded...? the Collector could be sync'd?
// always use single thread:
if (filter == null) {
for (int i = 0; i < subReaders.length; i++) { // search each subreader
collector.setNextReader(subReaders[i], docStarts[i]);
@ -268,7 +544,9 @@ public class IndexSearcher extends Searcher {
}
}
@Override
/** Expert: called to re-write queries into primitive queries.
* @throws BooleanQuery.TooManyClauses
*/
public Query rewrite(Query original) throws IOException {
Query query = original;
for (Query rewrittenQuery = query.rewrite(reader); rewrittenQuery != query;
@ -278,8 +556,30 @@ public class IndexSearcher extends Searcher {
return query;
}
@Override
public Explanation explain(Weight weight, int doc) throws IOException {
/** Returns an Explanation that describes how <code>doc</code> scored against
* <code>query</code>.
*
* <p>This is intended to be used in developing Similarity implementations,
* and, for good performance, should not be displayed with every hit.
* Computing an explanation is as expensive as executing the query over the
* entire index.
*/
public Explanation explain(Query query, int doc) throws IOException {
return explain(createWeight(query), doc);
}
/** Expert: low-level implementation method
* Returns an Explanation that describes how <code>doc</code> scored against
* <code>weight</code>.
*
* <p>This is intended to be used in developing Similarity implementations,
* and, for good performance, should not be displayed with every hit.
* Computing an explanation is as expensive as executing the query over the
* entire index.
* <p>Applications should call {@link Searcher#explain(Query, int)}.
* @throws BooleanQuery.TooManyClauses
*/
protected Explanation explain(Weight weight, int doc) throws IOException {
int n = ReaderUtil.subIndex(doc, docStarts);
int deBasedDoc = doc - docStarts[n];
@ -305,4 +605,175 @@ public class IndexSearcher extends Searcher {
fieldSortDoTrackScores = doTrackScores;
fieldSortDoMaxScore = doMaxScore;
}
/**
* creates a weight for <code>query</code>
* @return new weight
*/
protected Weight createWeight(Query query) throws IOException {
return query.weight(this);
}
/**
* A thread subclass for searching a single searchable
*/
private static final class MultiSearcherCallableNoSort implements Callable<TopDocs> {
private final Lock lock;
private final IndexSearcher searchable;
private final Weight weight;
private final Filter filter;
private final int nDocs;
private final int i;
private final HitQueue hq;
private final int[] starts;
public MultiSearcherCallableNoSort(Lock lock, IndexSearcher searchable, Weight weight,
Filter filter, int nDocs, HitQueue hq, int i, int[] starts) {
this.lock = lock;
this.searchable = searchable;
this.weight = weight;
this.filter = filter;
this.nDocs = nDocs;
this.hq = hq;
this.i = i;
this.starts = starts;
}
public TopDocs call() throws IOException {
final TopDocs docs = searchable.search (weight, filter, nDocs);
final ScoreDoc[] scoreDocs = docs.scoreDocs;
for (int j = 0; j < scoreDocs.length; j++) { // merge scoreDocs into hq
final ScoreDoc scoreDoc = scoreDocs[j];
scoreDoc.doc += starts[i]; // convert doc
//it would be so nice if we had a thread-safe insert
lock.lock();
try {
if (scoreDoc == hq.insertWithOverflow(scoreDoc))
break;
} finally {
lock.unlock();
}
}
return docs;
}
}
/**
* A thread subclass for searching a single searchable
*/
private static final class MultiSearcherCallableWithSort implements Callable<TopFieldDocs> {
private final Lock lock;
private final IndexSearcher searchable;
private final Weight weight;
private final Filter filter;
private final int nDocs;
private final int i;
private final FieldDocSortedHitQueue hq;
private final int[] starts;
private final Sort sort;
public MultiSearcherCallableWithSort(Lock lock, IndexSearcher searchable, Weight weight,
Filter filter, int nDocs, FieldDocSortedHitQueue hq, Sort sort, int i, int[] starts) {
this.lock = lock;
this.searchable = searchable;
this.weight = weight;
this.filter = filter;
this.nDocs = nDocs;
this.hq = hq;
this.i = i;
this.starts = starts;
this.sort = sort;
}
public TopFieldDocs call() throws IOException {
final TopFieldDocs docs = searchable.search (weight, filter, nDocs, sort);
// If one of the Sort fields is FIELD_DOC, need to fix its values, so that
// it will break ties by doc Id properly. Otherwise, it will compare to
// 'relative' doc Ids, that belong to two different searchables.
for (int j = 0; j < docs.fields.length; j++) {
if (docs.fields[j].getType() == SortField.DOC) {
// iterate over the score docs and change their fields value
for (int j2 = 0; j2 < docs.scoreDocs.length; j2++) {
FieldDoc fd = (FieldDoc) docs.scoreDocs[j2];
fd.fields[j] = Integer.valueOf(((Integer) fd.fields[j]).intValue() + starts[i]);
}
break;
}
}
lock.lock();
try {
hq.setFields(docs.fields);
} finally {
lock.unlock();
}
final ScoreDoc[] scoreDocs = docs.scoreDocs;
for (int j = 0; j < scoreDocs.length; j++) { // merge scoreDocs into hq
final FieldDoc fieldDoc = (FieldDoc) scoreDocs[j];
fieldDoc.doc += starts[i]; // convert doc
//it would be so nice if we had a thread-safe insert
lock.lock();
try {
if (fieldDoc == hq.insertWithOverflow(fieldDoc))
break;
} finally {
lock.unlock();
}
}
return docs;
}
}
/**
* A helper class that wraps a {@link CompletionService} and provides an
* iterable interface to the completed {@link Callable} instances.
*
* @param <T>
* the type of the {@link Callable} return value
*/
private static final class ExecutionHelper<T> implements Iterator<T>, Iterable<T> {
private final CompletionService<T> service;
private int numTasks;
ExecutionHelper(final Executor executor) {
this.service = new ExecutorCompletionService<T>(executor);
}
public boolean hasNext() {
return numTasks > 0;
}
public void submit(Callable<T> task) {
this.service.submit(task);
++numTasks;
}
public T next() {
if(!this.hasNext())
throw new NoSuchElementException();
try {
return service.take().get();
} catch (InterruptedException e) {
throw new ThreadInterruptedException(e);
} catch (ExecutionException e) {
throw new RuntimeException(e);
} finally {
--numTasks;
}
}
public void remove() {
throw new UnsupportedOperationException();
}
public Iterator<T> iterator() {
// use the shortcut here - this is only used in a privat context
return this;
}
}
}

View File

@ -95,7 +95,7 @@ public class MatchAllDocsQuery extends Query {
private float queryWeight;
private float queryNorm;
public MatchAllDocsWeight(Searcher searcher) {
public MatchAllDocsWeight(IndexSearcher searcher) {
this.similarity = searcher.getSimilarity();
}
@ -147,7 +147,7 @@ public class MatchAllDocsQuery extends Query {
}
@Override
public Weight createWeight(Searcher searcher) {
public Weight createWeight(IndexSearcher searcher) {
return new MatchAllDocsWeight(searcher);
}

View File

@ -134,7 +134,7 @@ public class MultiPhraseQuery extends Query {
private float queryNorm;
private float queryWeight;
public MultiPhraseWeight(Searcher searcher)
public MultiPhraseWeight(IndexSearcher searcher)
throws IOException {
this.similarity = getSimilarity(searcher);
@ -324,7 +324,7 @@ public class MultiPhraseQuery extends Query {
}
@Override
public Weight createWeight(Searcher searcher) throws IOException {
public Weight createWeight(IndexSearcher searcher) throws IOException {
return new MultiPhraseWeight(searcher);
}

View File

@ -1,461 +0,0 @@
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.util.ReaderUtil;
import org.apache.lucene.util.DummyConcurrentLock;
import java.io.IOException;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.locks.Lock;
/** Implements search over a set of <code>Searchables</code>.
*
* <p>Applications usually need only call the inherited {@link #search(Query,int)}
* or {@link #search(Query,Filter,int)} methods.
*/
public class MultiSearcher extends Searcher {
/**
* Document Frequency cache acting as a Dummy-Searcher. This class is no
* full-fledged Searcher, but only supports the methods necessary to
* initialize Weights.
*/
private static class CachedDfSource extends Searcher {
private final Map<Term,Integer> dfMap; // Map from Terms to corresponding doc freqs
private final int maxDoc; // document count
public CachedDfSource(Map<Term,Integer> dfMap, int maxDoc, Similarity similarity) {
this.dfMap = dfMap;
this.maxDoc = maxDoc;
setSimilarity(similarity);
}
@Override
public int docFreq(Term term) {
int df;
try {
df = dfMap.get(term).intValue();
} catch (NullPointerException e) {
throw new IllegalArgumentException("df for term " + term.text()
+ " not available");
}
return df;
}
@Override
public int[] docFreqs(Term[] terms) {
final int[] result = new int[terms.length];
for (int i = 0; i < terms.length; i++) {
result[i] = docFreq(terms[i]);
}
return result;
}
@Override
public int maxDoc() {
return maxDoc;
}
@Override
public Query rewrite(Query query) {
// this is a bit of a hack. We know that a query which
// creates a Weight based on this Dummy-Searcher is
// always already rewritten (see preparedWeight()).
// Therefore we just return the unmodified query here
return query;
}
@Override
public void close() {
throw new UnsupportedOperationException();
}
@Override
public Document doc(int i) {
throw new UnsupportedOperationException();
}
@Override
public Document doc(int i, FieldSelector fieldSelector) {
throw new UnsupportedOperationException();
}
@Override
public Explanation explain(Weight weight,int doc) {
throw new UnsupportedOperationException();
}
@Override
public void search(Weight weight, Filter filter, Collector results) {
throw new UnsupportedOperationException();
}
@Override
public TopDocs search(Weight weight,Filter filter,int n) {
throw new UnsupportedOperationException();
}
@Override
public TopFieldDocs search(Weight weight,Filter filter,int n,Sort sort) {
throw new UnsupportedOperationException();
}
}
private Searchable[] searchables;
private int[] starts;
private int maxDoc = 0;
/** Creates a searcher which searches <i>searchers</i>. */
public MultiSearcher(Searchable... searchables) throws IOException {
this.searchables = searchables;
starts = new int[searchables.length + 1]; // build starts array
for (int i = 0; i < searchables.length; i++) {
starts[i] = maxDoc;
maxDoc += searchables[i].maxDoc(); // compute maxDocs
}
starts[searchables.length] = maxDoc;
}
/** Return the array of {@link Searchable}s this searches. */
public Searchable[] getSearchables() {
return searchables;
}
protected int[] getStarts() {
return starts;
}
// inherit javadoc
@Override
public void close() throws IOException {
for (int i = 0; i < searchables.length; i++)
searchables[i].close();
}
@Override
public int docFreq(Term term) throws IOException {
int docFreq = 0;
for (int i = 0; i < searchables.length; i++)
docFreq += searchables[i].docFreq(term);
return docFreq;
}
// inherit javadoc
@Override
public Document doc(int n) throws CorruptIndexException, IOException {
int i = subSearcher(n); // find searcher index
return searchables[i].doc(n - starts[i]); // dispatch to searcher
}
// inherit javadoc
@Override
public Document doc(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
int i = subSearcher(n); // find searcher index
return searchables[i].doc(n - starts[i], fieldSelector); // dispatch to searcher
}
/** Returns index of the searcher for document <code>n</code> in the array
* used to construct this searcher. */
public int subSearcher(int n) { // find searcher for doc n:
return ReaderUtil.subIndex(n, starts);
}
/** Returns the document number of document <code>n</code> within its
* sub-index. */
public int subDoc(int n) {
return n - starts[subSearcher(n)];
}
@Override
public int maxDoc() throws IOException {
return maxDoc;
}
@Override
public TopDocs search(Weight weight, Filter filter, int nDocs)
throws IOException {
nDocs = Math.min(nDocs, maxDoc());
final HitQueue hq = new HitQueue(nDocs, false);
int totalHits = 0;
for (int i = 0; i < searchables.length; i++) { // search each searcher
final TopDocs docs = new MultiSearcherCallableNoSort(DummyConcurrentLock.INSTANCE,
searchables[i], weight, filter, nDocs, hq, i, starts).call();
totalHits += docs.totalHits; // update totalHits
}
final ScoreDoc[] scoreDocs = new ScoreDoc[hq.size()];
for (int i = hq.size()-1; i >= 0; i--) // put docs in array
scoreDocs[i] = hq.pop();
float maxScore = (totalHits==0) ? Float.NEGATIVE_INFINITY : scoreDocs[0].score;
return new TopDocs(totalHits, scoreDocs, maxScore);
}
@Override
public TopFieldDocs search (Weight weight, Filter filter, int n, Sort sort) throws IOException {
n = Math.min(n, maxDoc());
FieldDocSortedHitQueue hq = new FieldDocSortedHitQueue(n);
int totalHits = 0;
float maxScore=Float.NEGATIVE_INFINITY;
for (int i = 0; i < searchables.length; i++) { // search each searcher
final TopFieldDocs docs = new MultiSearcherCallableWithSort(DummyConcurrentLock.INSTANCE,
searchables[i], weight, filter, n, hq, sort, i, starts).call();
totalHits += docs.totalHits; // update totalHits
maxScore = Math.max(maxScore, docs.getMaxScore());
}
final ScoreDoc[] scoreDocs = new ScoreDoc[hq.size()];
for (int i = hq.size() - 1; i >= 0; i--) // put docs in array
scoreDocs[i] = hq.pop();
return new TopFieldDocs (totalHits, scoreDocs, hq.getFields(), maxScore);
}
// inherit javadoc
@Override
public void search(Weight weight, Filter filter, final Collector collector)
throws IOException {
for (int i = 0; i < searchables.length; i++) {
final int start = starts[i];
final Collector hc = new Collector() {
@Override
public void setScorer(Scorer scorer) throws IOException {
collector.setScorer(scorer);
}
@Override
public void collect(int doc) throws IOException {
collector.collect(doc);
}
@Override
public void setNextReader(IndexReader reader, int docBase) throws IOException {
collector.setNextReader(reader, start + docBase);
}
@Override
public boolean acceptsDocsOutOfOrder() {
return collector.acceptsDocsOutOfOrder();
}
};
searchables[i].search(weight, filter, hc);
}
}
@Override
public Query rewrite(Query original) throws IOException {
final Query[] queries = new Query[searchables.length];
for (int i = 0; i < searchables.length; i++) {
queries[i] = searchables[i].rewrite(original);
}
return queries[0].combine(queries);
}
@Override
public Explanation explain(Weight weight, int doc) throws IOException {
final int i = subSearcher(doc); // find searcher index
return searchables[i].explain(weight, doc - starts[i]); // dispatch to searcher
}
/**
* Create weight in multiple index scenario.
*
* Distributed query processing is done in the following steps:
* 1. rewrite query
* 2. extract necessary terms
* 3. collect dfs for these terms from the Searchables
* 4. create query weight using aggregate dfs.
* 5. distribute that weight to Searchables
* 6. merge results
*
* Steps 1-4 are done here, 5+6 in the search() methods
*
* @return rewritten queries
*/
@Override
protected Weight createWeight(Query original) throws IOException {
// step 1
final Query rewrittenQuery = rewrite(original);
// step 2
final Set<Term> terms = new HashSet<Term>();
rewrittenQuery.extractTerms(terms);
// step3
final Map<Term,Integer> dfMap = createDocFrequencyMap(terms);
// step4
final int numDocs = maxDoc();
final CachedDfSource cacheSim = new CachedDfSource(dfMap, numDocs, getSimilarity());
return rewrittenQuery.weight(cacheSim);
}
/**
* Collects the document frequency for the given terms form all searchables
* @param terms term set used to collect the document frequency form all
* searchables
* @return a map with a term as the key and the terms aggregated document
* frequency as a value
* @throws IOException if a searchable throws an {@link IOException}
*/
Map<Term, Integer> createDocFrequencyMap(final Set<Term> terms) throws IOException {
final Term[] allTermsArray = terms.toArray(new Term[terms.size()]);
final int[] aggregatedDfs = new int[allTermsArray.length];
for (Searchable searchable : searchables) {
final int[] dfs = searchable.docFreqs(allTermsArray);
for(int j=0; j<aggregatedDfs.length; j++){
aggregatedDfs[j] += dfs[j];
}
}
final HashMap<Term,Integer> dfMap = new HashMap<Term,Integer>();
for(int i=0; i<allTermsArray.length; i++) {
dfMap.put(allTermsArray[i], Integer.valueOf(aggregatedDfs[i]));
}
return dfMap;
}
/**
* A thread subclass for searching a single searchable
*/
static final class MultiSearcherCallableNoSort implements Callable<TopDocs> {
private final Lock lock;
private final Searchable searchable;
private final Weight weight;
private final Filter filter;
private final int nDocs;
private final int i;
private final HitQueue hq;
private final int[] starts;
public MultiSearcherCallableNoSort(Lock lock, Searchable searchable, Weight weight,
Filter filter, int nDocs, HitQueue hq, int i, int[] starts) {
this.lock = lock;
this.searchable = searchable;
this.weight = weight;
this.filter = filter;
this.nDocs = nDocs;
this.hq = hq;
this.i = i;
this.starts = starts;
}
public TopDocs call() throws IOException {
final TopDocs docs = searchable.search (weight, filter, nDocs);
final ScoreDoc[] scoreDocs = docs.scoreDocs;
for (int j = 0; j < scoreDocs.length; j++) { // merge scoreDocs into hq
final ScoreDoc scoreDoc = scoreDocs[j];
scoreDoc.doc += starts[i]; // convert doc
//it would be so nice if we had a thread-safe insert
lock.lock();
try {
if (scoreDoc == hq.insertWithOverflow(scoreDoc))
break;
} finally {
lock.unlock();
}
}
return docs;
}
}
/**
* A thread subclass for searching a single searchable
*/
static final class MultiSearcherCallableWithSort implements Callable<TopFieldDocs> {
private final Lock lock;
private final Searchable searchable;
private final Weight weight;
private final Filter filter;
private final int nDocs;
private final int i;
private final FieldDocSortedHitQueue hq;
private final int[] starts;
private final Sort sort;
public MultiSearcherCallableWithSort(Lock lock, Searchable searchable, Weight weight,
Filter filter, int nDocs, FieldDocSortedHitQueue hq, Sort sort, int i, int[] starts) {
this.lock = lock;
this.searchable = searchable;
this.weight = weight;
this.filter = filter;
this.nDocs = nDocs;
this.hq = hq;
this.i = i;
this.starts = starts;
this.sort = sort;
}
public TopFieldDocs call() throws IOException {
final TopFieldDocs docs = searchable.search (weight, filter, nDocs, sort);
// If one of the Sort fields is FIELD_DOC, need to fix its values, so that
// it will break ties by doc Id properly. Otherwise, it will compare to
// 'relative' doc Ids, that belong to two different searchables.
for (int j = 0; j < docs.fields.length; j++) {
if (docs.fields[j].getType() == SortField.DOC) {
// iterate over the score docs and change their fields value
for (int j2 = 0; j2 < docs.scoreDocs.length; j2++) {
FieldDoc fd = (FieldDoc) docs.scoreDocs[j2];
fd.fields[j] = Integer.valueOf(((Integer) fd.fields[j]).intValue() + starts[i]);
}
break;
}
}
lock.lock();
try {
hq.setFields(docs.fields);
} finally {
lock.unlock();
}
final ScoreDoc[] scoreDocs = docs.scoreDocs;
for (int j = 0; j < scoreDocs.length; j++) { // merge scoreDocs into hq
final FieldDoc fieldDoc = (FieldDoc) scoreDocs[j];
fieldDoc.doc += starts[i]; // convert doc
//it would be so nice if we had a thread-safe insert
lock.lock();
try {
if (fieldDoc == hq.insertWithOverflow(fieldDoc))
break;
} finally {
lock.unlock();
}
}
return docs;
}
}
}

View File

@ -1,290 +0,0 @@
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.HashMap;
import java.util.Iterator;
import java.util.NoSuchElementException;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.CompletionService;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executor;
import java.util.concurrent.ExecutorCompletionService;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.util.NamedThreadFactory;
import org.apache.lucene.util.ThreadInterruptedException;
/** Implements parallel search over a set of <code>Searchables</code>.
*
* <p>Applications usually need only call the inherited {@link #search(Query,int)}
* or {@link #search(Query,Filter,int)} methods.
*/
public class ParallelMultiSearcher extends MultiSearcher {
private final ExecutorService executor;
private final Searchable[] searchables;
private final int[] starts;
/** Creates a {@link Searchable} which searches <i>searchables</i> with the default
* executor service (a cached thread pool). */
public ParallelMultiSearcher(Searchable... searchables) throws IOException {
this(Executors.newCachedThreadPool(new NamedThreadFactory(ParallelMultiSearcher.class.getSimpleName())), searchables);
}
/**
* Creates a {@link Searchable} which searches <i>searchables</i> with the specified ExecutorService.
*/
public ParallelMultiSearcher(ExecutorService executor, Searchable... searchables) throws IOException {
super(searchables);
this.searchables = searchables;
this.starts = getStarts();
this.executor = executor;
}
/**
* Executes each {@link Searchable}'s docFreq() in its own thread and waits for each search to complete and merge
* the results back together.
*/
@Override
public int docFreq(final Term term) throws IOException {
final ExecutionHelper<Integer> runner = new ExecutionHelper<Integer>(executor);
for(int i = 0; i < searchables.length; i++) {
final Searchable searchable = searchables[i];
runner.submit(new Callable<Integer>() {
public Integer call() throws IOException {
return Integer.valueOf(searchable.docFreq(term));
}
});
}
int docFreq = 0;
for (Integer num : runner) {
docFreq += num.intValue();
}
return docFreq;
}
/**
* A search implementation which executes each
* {@link Searchable} in its own thread and waits for each search to complete and merge
* the results back together.
*/
@Override
public TopDocs search(Weight weight, Filter filter, int nDocs) throws IOException {
final HitQueue hq = new HitQueue(nDocs, false);
final Lock lock = new ReentrantLock();
final ExecutionHelper<TopDocs> runner = new ExecutionHelper<TopDocs>(executor);
for (int i = 0; i < searchables.length; i++) { // search each searchable
runner.submit(
new MultiSearcherCallableNoSort(lock, searchables[i], weight, filter, nDocs, hq, i, starts));
}
int totalHits = 0;
float maxScore = Float.NEGATIVE_INFINITY;
for (final TopDocs topDocs : runner) {
totalHits += topDocs.totalHits;
maxScore = Math.max(maxScore, topDocs.getMaxScore());
}
final ScoreDoc[] scoreDocs = new ScoreDoc[hq.size()];
for (int i = hq.size() - 1; i >= 0; i--) // put docs in array
scoreDocs[i] = hq.pop();
return new TopDocs(totalHits, scoreDocs, maxScore);
}
/**
* A search implementation allowing sorting which spans a new thread for each
* Searchable, waits for each search to complete and merges
* the results back together.
*/
@Override
public TopFieldDocs search(Weight weight, Filter filter, int nDocs, Sort sort) throws IOException {
if (sort == null) throw new NullPointerException();
final FieldDocSortedHitQueue hq = new FieldDocSortedHitQueue(nDocs);
final Lock lock = new ReentrantLock();
final ExecutionHelper<TopFieldDocs> runner = new ExecutionHelper<TopFieldDocs>(executor);
for (int i = 0; i < searchables.length; i++) { // search each searchable
runner.submit(
new MultiSearcherCallableWithSort(lock, searchables[i], weight, filter, nDocs, hq, sort, i, starts));
}
int totalHits = 0;
float maxScore = Float.NEGATIVE_INFINITY;
for (final TopFieldDocs topFieldDocs : runner) {
totalHits += topFieldDocs.totalHits;
maxScore = Math.max(maxScore, topFieldDocs.getMaxScore());
}
final ScoreDoc[] scoreDocs = new ScoreDoc[hq.size()];
for (int i = hq.size() - 1; i >= 0; i--) // put docs in array
scoreDocs[i] = hq.pop();
return new TopFieldDocs(totalHits, scoreDocs, hq.getFields(), maxScore);
}
/** Lower-level search API.
*
* <p>{@link Collector#collect(int)} is called for every matching document.
*
* <p>Applications should only use this if they need <i>all</i> of the
* matching documents. The high-level search API ({@link
* Searcher#search(Query,int)}) is usually more efficient, as it skips
* non-high-scoring hits.
*
* <p>This method cannot be parallelized, because {@link Collector}
* supports no concurrent access.
*
* @param weight to match documents
* @param filter if non-null, a bitset used to eliminate some documents
* @param collector to receive hits
*/
@Override
public void search(final Weight weight, final Filter filter, final Collector collector)
throws IOException {
for (int i = 0; i < searchables.length; i++) {
final int start = starts[i];
final Collector hc = new Collector() {
@Override
public void setScorer(final Scorer scorer) throws IOException {
collector.setScorer(scorer);
}
@Override
public void collect(final int doc) throws IOException {
collector.collect(doc);
}
@Override
public void setNextReader(final IndexReader reader, final int docBase) throws IOException {
collector.setNextReader(reader, start + docBase);
}
@Override
public boolean acceptsDocsOutOfOrder() {
return collector.acceptsDocsOutOfOrder();
}
};
searchables[i].search(weight, filter, hc);
}
}
@Override
public void close() throws IOException {
executor.shutdown();
super.close();
}
@Override
HashMap<Term, Integer> createDocFrequencyMap(Set<Term> terms) throws IOException {
final Term[] allTermsArray = terms.toArray(new Term[terms.size()]);
final int[] aggregatedDocFreqs = new int[terms.size()];
final ExecutionHelper<int[]> runner = new ExecutionHelper<int[]>(executor);
for (Searchable searchable : searchables) {
runner.submit(
new DocumentFrequencyCallable(searchable, allTermsArray));
}
final int docFreqLen = aggregatedDocFreqs.length;
for (final int[] docFreqs : runner) {
for(int i=0; i < docFreqLen; i++){
aggregatedDocFreqs[i] += docFreqs[i];
}
}
final HashMap<Term,Integer> dfMap = new HashMap<Term,Integer>();
for(int i=0; i<allTermsArray.length; i++) {
dfMap.put(allTermsArray[i], Integer.valueOf(aggregatedDocFreqs[i]));
}
return dfMap;
}
/**
* A {@link Callable} to retrieve the document frequencies for a Term array
*/
private static final class DocumentFrequencyCallable implements Callable<int[]> {
private final Searchable searchable;
private final Term[] terms;
public DocumentFrequencyCallable(Searchable searchable, Term[] terms) {
this.searchable = searchable;
this.terms = terms;
}
public int[] call() throws Exception {
return searchable.docFreqs(terms);
}
}
/**
* A helper class that wraps a {@link CompletionService} and provides an
* iterable interface to the completed {@link Callable} instances.
*
* @param <T>
* the type of the {@link Callable} return value
*/
private static final class ExecutionHelper<T> implements Iterator<T>, Iterable<T> {
private final CompletionService<T> service;
private int numTasks;
ExecutionHelper(final Executor executor) {
this.service = new ExecutorCompletionService<T>(executor);
}
public boolean hasNext() {
return numTasks > 0;
}
public void submit(Callable<T> task) {
this.service.submit(task);
++numTasks;
}
public T next() {
if(!this.hasNext())
throw new NoSuchElementException();
try {
return service.take().get();
} catch (InterruptedException e) {
throw new ThreadInterruptedException(e);
} catch (ExecutionException e) {
throw new RuntimeException(e);
} finally {
--numTasks;
}
}
public void remove() {
throw new UnsupportedOperationException();
}
public Iterator<T> iterator() {
// use the shortcut here - this is only used in a privat context
return this;
}
}
}

View File

@ -143,7 +143,7 @@ public class PhraseQuery extends Query {
private float queryWeight;
private IDFExplanation idfExp;
public PhraseWeight(Searcher searcher)
public PhraseWeight(IndexSearcher searcher)
throws IOException {
this.similarity = getSimilarity(searcher);
@ -311,7 +311,7 @@ public class PhraseQuery extends Query {
}
@Override
public Weight createWeight(Searcher searcher) throws IOException {
public Weight createWeight(IndexSearcher searcher) throws IOException {
if (terms.size() == 1) { // optimize one-term case
Term term = terms.get(0);
Query termQuery = new TermQuery(term);

View File

@ -19,8 +19,6 @@ package org.apache.lucene.search;
import java.io.IOException;
import java.util.HashSet;
import java.util.Set;
import org.apache.lucene.index.IndexReader;
@ -89,14 +87,14 @@ public abstract class Query implements java.io.Serializable, Cloneable {
* <p>
* Only implemented by primitive queries, which re-write to themselves.
*/
public Weight createWeight(Searcher searcher) throws IOException {
public Weight createWeight(IndexSearcher searcher) throws IOException {
throw new UnsupportedOperationException();
}
/**
* Expert: Constructs and initializes a Weight for a top-level query.
*/
public Weight weight(Searcher searcher) throws IOException {
public Weight weight(IndexSearcher searcher) throws IOException {
Query query = searcher.rewrite(this);
Weight weight = query.createWeight(searcher);
float sum = weight.sumOfSquaredWeights();
@ -116,52 +114,6 @@ public abstract class Query implements java.io.Serializable, Cloneable {
return this;
}
/** Expert: called when re-writing queries under MultiSearcher.
*
* Create a single query suitable for use by all subsearchers (in 1-1
* correspondence with queries). This is an optimization of the OR of
* all queries. We handle the common optimization cases of equal
* queries and overlapping clauses of boolean OR queries (as generated
* by MultiTermQuery.rewrite()).
* Be careful overriding this method as queries[0] determines which
* method will be called and is not necessarily of the same type as
* the other queries.
*/
public Query combine(Query[] queries) {
HashSet<Query> uniques = new HashSet<Query>();
for (int i = 0; i < queries.length; i++) {
Query query = queries[i];
BooleanClause[] clauses = null;
// check if we can split the query into clauses
boolean splittable = (query instanceof BooleanQuery);
if(splittable){
BooleanQuery bq = (BooleanQuery) query;
splittable = bq.isCoordDisabled();
clauses = bq.getClauses();
for (int j = 0; splittable && j < clauses.length; j++) {
splittable = (clauses[j].getOccur() == BooleanClause.Occur.SHOULD);
}
}
if(splittable){
for (int j = 0; j < clauses.length; j++) {
uniques.add(clauses[j].getQuery());
}
} else {
uniques.add(query);
}
}
// optimization: if we have just one query, just return it
if(uniques.size() == 1){
return uniques.iterator().next();
}
BooleanQuery result = new BooleanQuery(true);
for (final Query query : uniques)
result.add(query, BooleanClause.Occur.SHOULD);
return result;
}
/**
* Expert: adds all terms occurring in this query to the terms set. Only
* works if this query is in its {@link #rewrite rewritten} form.
@ -174,35 +126,11 @@ public abstract class Query implements java.io.Serializable, Cloneable {
}
/** Expert: merges the clauses of a set of BooleanQuery's into a single
* BooleanQuery.
*
*<p>A utility for use by {@link #combine(Query[])} implementations.
*/
public static Query mergeBooleanQueries(BooleanQuery... queries) {
HashSet<BooleanClause> allClauses = new HashSet<BooleanClause>();
for (BooleanQuery booleanQuery : queries) {
for (BooleanClause clause : booleanQuery) {
allClauses.add(clause);
}
}
boolean coordDisabled =
queries.length==0? false : queries[0].isCoordDisabled();
BooleanQuery result = new BooleanQuery(coordDisabled);
for(BooleanClause clause2 : allClauses) {
result.add(clause2);
}
return result;
}
/** Expert: Returns the Similarity implementation to be used for this query.
* Subclasses may override this method to specify their own Similarity
* implementation, perhaps one that delegates through that of the Searcher.
* By default the Searcher's Similarity implementation is returned.*/
public Similarity getSimilarity(Searcher searcher) {
public Similarity getSimilarity(IndexSearcher searcher) {
return searcher.getSimilarity();
}

View File

@ -1,165 +0,0 @@
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.io.Closeable;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.Term;
/**
* The interface for search implementations.
*
* <p>
* Searchable is the abstract network protocol for searching. Implementations
* provide search over a single index, over multiple indices, and over indices
* on remote servers.
*
* <p>
* Queries, filters and sort criteria are designed to be compact so that they
* may be efficiently passed to a remote index, with only the top-scoring hits
* being returned, rather than every matching hit.
*
* <b>NOTE:</b> this interface is kept public for convenience. Since it is not
* expected to be implemented directly, it may be changed unexpectedly between
* releases.
*/
public interface Searchable extends Closeable {
/**
* Lower-level search API.
*
* <p>
* {@link Collector#collect(int)} is called for every document. <br>
* Collector-based access to remote indexes is discouraged.
*
* <p>
* Applications should only use this if they need <i>all</i> of the matching
* documents. The high-level search API ({@link Searcher#search(Query,int)}) is
* usually more efficient, as it skips non-high-scoring hits.
*
* @param weight
* to match documents
* @param filter
* if non-null, used to permit documents to be collected.
* @param collector
* to receive hits
* @throws BooleanQuery.TooManyClauses
*/
void search(Weight weight, Filter filter, Collector collector) throws IOException;
/** Frees resources associated with this Searcher.
* Be careful not to call this method while you are still using objects
* that reference this Searchable.
*/
void close() throws IOException;
/** Expert: Returns the number of documents containing <code>term</code>.
*
* @see org.apache.lucene.index.IndexReader#docFreq(Term)
*/
int docFreq(Term term) throws IOException;
/** Expert: For each term in the terms array, calculates the number of
* documents containing <code>term</code>. Returns an array with these
* document frequencies. Used to minimize number of remote calls.
*/
int[] docFreqs(Term[] terms) throws IOException;
/** Expert: Returns one greater than the largest possible document number.
*
* @see org.apache.lucene.index.IndexReader#maxDoc()
*/
int maxDoc() throws IOException;
/** Expert: Low-level search implementation. Finds the top <code>n</code>
* hits for <code>query</code>, applying <code>filter</code> if non-null.
*
* <p>Applications should usually call {@link Searcher#search(Query,int)} or
* {@link Searcher#search(Query,Filter,int)} instead.
* @throws BooleanQuery.TooManyClauses
*/
TopDocs search(Weight weight, Filter filter, int n) throws IOException;
/**
* Returns the stored fields of document <code>i</code>.
*
* @see org.apache.lucene.index.IndexReader#document(int)
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
Document doc(int i) throws CorruptIndexException, IOException;
/**
* Get the {@link org.apache.lucene.document.Document} at the <code>n</code><sup>th</sup> position. The {@link org.apache.lucene.document.FieldSelector}
* may be used to determine what {@link org.apache.lucene.document.Field}s to load and how they should be loaded.
*
* <b>NOTE:</b> If the underlying Reader (more specifically, the underlying <code>FieldsReader</code>) is closed before the lazy {@link org.apache.lucene.document.Field} is
* loaded an exception may be thrown. If you want the value of a lazy {@link org.apache.lucene.document.Field} to be available after closing you must
* explicitly load it or fetch the Document again with a new loader.
*
*
* @param n Get the document at the <code>n</code><sup>th</sup> position
* @param fieldSelector The {@link org.apache.lucene.document.FieldSelector} to use to determine what Fields should be loaded on the Document. May be null, in which case all Fields will be loaded.
* @return The stored fields of the {@link org.apache.lucene.document.Document} at the nth position
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*
* @see org.apache.lucene.index.IndexReader#document(int, FieldSelector)
* @see org.apache.lucene.document.Fieldable
* @see org.apache.lucene.document.FieldSelector
* @see org.apache.lucene.document.SetBasedFieldSelector
* @see org.apache.lucene.document.LoadFirstFieldSelector
*/
Document doc(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException;
/** Expert: called to re-write queries into primitive queries.
* @throws BooleanQuery.TooManyClauses
*/
Query rewrite(Query query) throws IOException;
/** Expert: low-level implementation method
* Returns an Explanation that describes how <code>doc</code> scored against
* <code>weight</code>.
*
* <p>This is intended to be used in developing Similarity implementations,
* and, for good performance, should not be displayed with every hit.
* Computing an explanation is as expensive as executing the query over the
* entire index.
* <p>Applications should call {@link Searcher#explain(Query, int)}.
* @throws BooleanQuery.TooManyClauses
*/
Explanation explain(Weight weight, int doc) throws IOException;
/** Expert: Low-level search implementation with arbitrary sorting. Finds
* the top <code>n</code> hits for <code>query</code>, applying
* <code>filter</code> if non-null, and sorting the hits by the criteria in
* <code>sort</code>.
*
* <p>Applications should usually call {@link
* Searcher#search(Query,Filter,int,Sort)} instead.
*
* @throws BooleanQuery.TooManyClauses
*/
TopFieldDocs search(Weight weight, Filter filter, int n, Sort sort)
throws IOException;
}

View File

@ -1,183 +0,0 @@
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.Term;
import org.apache.lucene.document.FieldSelector;
/**
* An abstract base class for search implementations. Implements the main search
* methods.
*
* <p>
* Note that you can only access hits from a Searcher as long as it is not yet
* closed, otherwise an IOException will be thrown.
*/
public abstract class Searcher implements Searchable {
/** Search implementation with arbitrary sorting. Finds
* the top <code>n</code> hits for <code>query</code>, applying
* <code>filter</code> if non-null, and sorting the hits by the criteria in
* <code>sort</code>.
*
* <p>NOTE: this does not compute scores by default; use
* {@link IndexSearcher#setDefaultFieldSortScoring} to
* enable scoring.
*
* @throws BooleanQuery.TooManyClauses
*/
public TopFieldDocs search(Query query, Filter filter, int n,
Sort sort) throws IOException {
return search(createWeight(query), filter, n, sort);
}
/**
* Search implementation with arbitrary sorting and no filter.
* @param query The query to search for
* @param n Return only the top n results
* @param sort The {@link org.apache.lucene.search.Sort} object
* @return The top docs, sorted according to the supplied {@link org.apache.lucene.search.Sort} instance
* @throws IOException
*/
public TopFieldDocs search(Query query, int n,
Sort sort) throws IOException {
return search(createWeight(query), null, n, sort);
}
/** Lower-level search API.
*
* <p>{@link Collector#collect(int)} is called for every matching document.
*
* <p>Applications should only use this if they need <i>all</i> of the
* matching documents. The high-level search API ({@link
* Searcher#search(Query, int)}) is usually more efficient, as it skips
* non-high-scoring hits.
* <p>Note: The <code>score</code> passed to this method is a raw score.
* In other words, the score will not necessarily be a float whose value is
* between 0 and 1.
* @throws BooleanQuery.TooManyClauses
*/
public void search(Query query, Collector results)
throws IOException {
search(createWeight(query), null, results);
}
/** Lower-level search API.
*
* <p>{@link Collector#collect(int)} is called for every matching
* document.
* <br>Collector-based access to remote indexes is discouraged.
*
* <p>Applications should only use this if they need <i>all</i> of the
* matching documents. The high-level search API ({@link
* Searcher#search(Query, Filter, int)}) is usually more efficient, as it skips
* non-high-scoring hits.
*
* @param query to match documents
* @param filter if non-null, used to permit documents to be collected.
* @param results to receive hits
* @throws BooleanQuery.TooManyClauses
*/
public void search(Query query, Filter filter, Collector results)
throws IOException {
search(createWeight(query), filter, results);
}
/** Finds the top <code>n</code>
* hits for <code>query</code>, applying <code>filter</code> if non-null.
*
* @throws BooleanQuery.TooManyClauses
*/
public TopDocs search(Query query, Filter filter, int n)
throws IOException {
return search(createWeight(query), filter, n);
}
/** Finds the top <code>n</code>
* hits for <code>query</code>.
*
* @throws BooleanQuery.TooManyClauses
*/
public TopDocs search(Query query, int n)
throws IOException {
return search(query, null, n);
}
/** Returns an Explanation that describes how <code>doc</code> scored against
* <code>query</code>.
*
* <p>This is intended to be used in developing Similarity implementations,
* and, for good performance, should not be displayed with every hit.
* Computing an explanation is as expensive as executing the query over the
* entire index.
*/
public Explanation explain(Query query, int doc) throws IOException {
return explain(createWeight(query), doc);
}
/** The Similarity implementation used by this searcher. */
private Similarity similarity = Similarity.getDefault();
/** Expert: Set the Similarity implementation used by this Searcher.
*
* @see Similarity#setDefault(Similarity)
*/
public void setSimilarity(Similarity similarity) {
this.similarity = similarity;
}
/** Expert: Return the Similarity implementation used by this Searcher.
*
* <p>This defaults to the current value of {@link Similarity#getDefault()}.
*/
public Similarity getSimilarity() {
return this.similarity;
}
/**
* creates a weight for <code>query</code>
* @return new weight
*/
protected Weight createWeight(Query query) throws IOException {
return query.weight(this);
}
// inherit javadoc
public int[] docFreqs(Term[] terms) throws IOException {
int[] result = new int[terms.length];
for (int i = 0; i < terms.length; i++) {
result[i] = docFreq(terms[i]);
}
return result;
}
abstract public void search(Weight weight, Filter filter, Collector results) throws IOException;
abstract public void close() throws IOException;
abstract public int docFreq(Term term) throws IOException;
abstract public int maxDoc() throws IOException;
abstract public TopDocs search(Weight weight, Filter filter, int n) throws IOException;
abstract public Document doc(int i) throws CorruptIndexException, IOException;
abstract public Document doc(int docid, FieldSelector fieldSelector) throws CorruptIndexException, IOException;
abstract public Query rewrite(Query query) throws IOException;
abstract public Explanation explain(Weight weight, int doc) throws IOException;
abstract public TopFieldDocs search(Weight weight, Filter filter, int n, Sort sort) throws IOException;
/* End patch for GCJ bug #15411. */
}

View File

@ -722,7 +722,7 @@ public abstract class Similarity implements Serializable {
and an explanation for the term.
* @throws IOException
*/
public IDFExplanation idfExplain(final Term term, final Searcher searcher, int docFreq) throws IOException {
public IDFExplanation idfExplain(final Term term, final IndexSearcher searcher, int docFreq) throws IOException {
final int df = docFreq;
final int max = searcher.maxDoc();
final float idf = idf(df, max);
@ -743,7 +743,7 @@ public abstract class Similarity implements Serializable {
* #idfExplain(Term,Searcher,int)} by passing
* <code>searcher.docFreq(term)</code> as the docFreq.
*/
public IDFExplanation idfExplain(final Term term, final Searcher searcher) throws IOException {
public IDFExplanation idfExplain(final Term term, final IndexSearcher searcher) throws IOException {
return idfExplain(term, searcher, searcher.docFreq(term));
}
@ -761,7 +761,7 @@ public abstract class Similarity implements Serializable {
* for each term.
* @throws IOException
*/
public IDFExplanation idfExplain(Collection<Term> terms, Searcher searcher) throws IOException {
public IDFExplanation idfExplain(Collection<Term> terms, IndexSearcher searcher) throws IOException {
final int max = searcher.maxDoc();
float idf = 0.0f;
final StringBuilder exp = new StringBuilder();

View File

@ -41,7 +41,7 @@ public class TermQuery extends Query {
private float queryWeight;
private IDFExplanation idfExp;
public TermWeight(Searcher searcher)
public TermWeight(IndexSearcher searcher)
throws IOException {
this.similarity = getSimilarity(searcher);
if (docFreq != -1) {
@ -180,7 +180,7 @@ public class TermQuery extends Query {
public Term getTerm() { return term; }
@Override
public Weight createWeight(Searcher searcher) throws IOException {
public Weight createWeight(IndexSearcher searcher) throws IOException {
return new TermWeight(searcher);
}

View File

@ -28,7 +28,7 @@ import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Weight;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Similarity;
import org.apache.lucene.util.ToStringUtils;
@ -187,7 +187,7 @@ public class CustomScoreQuery extends Query {
Weight[] valSrcWeights;
boolean qStrict;
public CustomWeight(Searcher searcher) throws IOException {
public CustomWeight(IndexSearcher searcher) throws IOException {
this.similarity = getSimilarity(searcher);
this.subQueryWeight = subQuery.weight(searcher);
this.valSrcWeights = new Weight[valSrcQueries.length];
@ -350,7 +350,7 @@ public class CustomScoreQuery extends Query {
}
@Override
public Weight createWeight(Searcher searcher) throws IOException {
public Weight createWeight(IndexSearcher searcher) throws IOException {
return new CustomWeight(searcher);
}

View File

@ -68,7 +68,7 @@ public class ValueSourceQuery extends Query {
float queryNorm;
float queryWeight;
public ValueSourceWeight(Searcher searcher) {
public ValueSourceWeight(IndexSearcher searcher) {
this.similarity = getSimilarity(searcher);
}
@ -173,7 +173,7 @@ public class ValueSourceQuery extends Query {
}
@Override
public Weight createWeight(Searcher searcher) {
public Weight createWeight(IndexSearcher searcher) {
return new ValueSourceQuery.ValueSourceWeight(searcher);
}

View File

@ -20,7 +20,7 @@ package org.apache.lucene.search.payloads;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Similarity;
import org.apache.lucene.search.Weight;
import org.apache.lucene.search.spans.NearSpansOrdered;
@ -66,7 +66,7 @@ public class PayloadNearQuery extends SpanNearQuery {
}
@Override
public Weight createWeight(Searcher searcher) throws IOException {
public Weight createWeight(IndexSearcher searcher) throws IOException {
return new PayloadNearSpanWeight(this, searcher);
}
@ -137,7 +137,7 @@ public class PayloadNearQuery extends SpanNearQuery {
}
public class PayloadNearSpanWeight extends SpanWeight {
public PayloadNearSpanWeight(SpanQuery query, Searcher searcher)
public PayloadNearSpanWeight(SpanQuery query, IndexSearcher searcher)
throws IOException {
super(query, searcher);
}

View File

@ -20,7 +20,7 @@ package org.apache.lucene.search.payloads;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
import org.apache.lucene.search.Similarity;
@ -62,13 +62,13 @@ public class PayloadTermQuery extends SpanTermQuery {
}
@Override
public Weight createWeight(Searcher searcher) throws IOException {
public Weight createWeight(IndexSearcher searcher) throws IOException {
return new PayloadTermWeight(this, searcher);
}
protected class PayloadTermWeight extends SpanWeight {
public PayloadTermWeight(PayloadTermQuery query, Searcher searcher)
public PayloadTermWeight(PayloadTermQuery query, IndexSearcher searcher)
throws IOException {
super(query, searcher);
}

View File

@ -24,7 +24,7 @@ import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Weight;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Similarity;
import org.apache.lucene.util.ToStringUtils;
@ -102,12 +102,12 @@ public class FieldMaskingSpanQuery extends SpanQuery {
}
@Override
public Weight createWeight(Searcher searcher) throws IOException {
public Weight createWeight(IndexSearcher searcher) throws IOException {
return maskedQuery.createWeight(searcher);
}
@Override
public Similarity getSimilarity(Searcher searcher) {
public Similarity getSimilarity(IndexSearcher searcher) {
return maskedQuery.getSimilarity(searcher);
}

View File

@ -21,7 +21,7 @@ import java.io.IOException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Weight;
/** Base class for span-based queries. */
@ -34,7 +34,7 @@ public abstract class SpanQuery extends Query {
public abstract String getField();
@Override
public Weight createWeight(Searcher searcher) throws IOException {
public Weight createWeight(IndexSearcher searcher) throws IOException {
return new SpanWeight(this, searcher);
}

View File

@ -40,7 +40,7 @@ public class SpanWeight extends Weight {
protected SpanQuery query;
private IDFExplanation idfExp;
public SpanWeight(SpanQuery query, Searcher searcher)
public SpanWeight(SpanQuery query, IndexSearcher searcher)
throws IOException {
this.similarity = query.getSimilarity(searcher);
this.query = query;

View File

@ -94,7 +94,7 @@ public class TestSearch extends LuceneTestCase {
}
writer.close();
Searcher searcher = new IndexSearcher(directory, true);
IndexSearcher searcher = new IndexSearcher(directory, true);
String[] queries = {
"a b",

View File

@ -102,7 +102,7 @@ public class TestSearchForDuplicates extends LuceneTestCase {
writer.close();
// try a search without OR
Searcher searcher = new IndexSearcher(directory, true);
IndexSearcher searcher = new IndexSearcher(directory, true);
QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, PRIORITY_FIELD, analyzer);
@ -133,7 +133,7 @@ public class TestSearchForDuplicates extends LuceneTestCase {
}
private void printHits(PrintWriter out, ScoreDoc[] hits, Searcher searcher ) throws IOException {
private void printHits(PrintWriter out, ScoreDoc[] hits, IndexSearcher searcher) throws IOException {
out.println(hits.length + " total results\n");
for (int i = 0 ; i < hits.length; i++) {
if ( i < 10 || (i > 94 && i < 105) ) {
@ -143,7 +143,7 @@ public class TestSearchForDuplicates extends LuceneTestCase {
}
}
private void checkHits(ScoreDoc[] hits, int expectedCount, Searcher searcher) throws IOException {
private void checkHits(ScoreDoc[] hits, int expectedCount, IndexSearcher searcher) throws IOException {
assertEquals("total results", expectedCount, hits.length);
for (int i = 0 ; i < hits.length; i++) {
if ( i < 10 || (i > 94 && i < 105) ) {

View File

@ -6,7 +6,6 @@ import org.apache.lucene.index.Term;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase;
@ -157,7 +156,7 @@ public class TestDocument extends LuceneTestCase {
writer.addDocument(makeDocumentWithFields());
IndexReader reader = writer.getReader();
Searcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = new IndexSearcher(reader);
// search for something that does exists
Query query = new TermQuery(new Term("keyword", "test1"));
@ -239,7 +238,7 @@ public class TestDocument extends LuceneTestCase {
writer.addDocument(doc);
IndexReader reader = writer.getReader();
Searcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = new IndexSearcher(reader);
Query query = new TermQuery(new Term("keyword", "test"));

View File

@ -360,7 +360,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
// First document should be #21 since it's norm was
// increased:
Document d = searcher.doc(hits[0].doc);
Document d = searcher.getIndexReader().document(hits[0].doc);
assertEquals("didn't get the right document first", "21", d.get("id"));
doTestHits(hits, 34, searcher.getIndexReader());
@ -408,7 +408,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
// make sure searching sees right # hits
IndexSearcher searcher = new IndexSearcher(dir, true);
ScoreDoc[] hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
Document d = searcher.doc(hits[0].doc);
Document d = searcher.getIndexReader().document(hits[0].doc);
assertEquals("wrong first document", "21", d.get("id"));
doTestHits(hits, 44, searcher.getIndexReader());
searcher.close();

View File

@ -27,7 +27,6 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.MockDirectoryWrapper;
@ -40,7 +39,7 @@ import org.apache.lucene.util.BytesRef;
*
*/
public class TestLazyProxSkipping extends LuceneTestCase {
private Searcher searcher;
private IndexSearcher searcher;
private int seeksCounter = 0;
private String field = "tokens";

View File

@ -25,6 +25,9 @@ import java.util.List;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.Executors;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
@ -38,6 +41,7 @@ import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.util.NamedThreadFactory;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.LineFileDocs;
@ -191,6 +195,8 @@ public class TestNRTThreads extends LuceneTestCase {
// silly starting guess:
final AtomicInteger totTermCount = new AtomicInteger(100);
final ExecutorService es = Executors.newCachedThreadPool(new NamedThreadFactory("NRT search threads"));
while(System.currentTimeMillis() < stopTime && !failed.get()) {
if (random.nextBoolean()) {
if (VERBOSE) {
@ -228,7 +234,7 @@ public class TestNRTThreads extends LuceneTestCase {
if (r.numDocs() > 0) {
final IndexSearcher s = new IndexSearcher(r);
final IndexSearcher s = new IndexSearcher(r, es);
// run search threads
final long searchStopTime = System.currentTimeMillis() + 500;
@ -302,6 +308,9 @@ public class TestNRTThreads extends LuceneTestCase {
}
}
es.shutdown();
es.awaitTermination(1, TimeUnit.SECONDS);
if (VERBOSE) {
System.out.println("TEST: all searching done [" + (System.currentTimeMillis()-t0) + " ms]");
}

View File

@ -41,7 +41,7 @@ public class TestOmitTf extends LuceneTestCase {
@Override public float sloppyFreq(int distance) { return 2.0f; }
@Override public float idf(int docFreq, int numDocs) { return 1.0f; }
@Override public float coord(int overlap, int maxOverlap) { return 1.0f; }
@Override public IDFExplanation idfExplain(Collection<Term> terms, Searcher searcher) throws IOException {
@Override public IDFExplanation idfExplain(Collection<Term> terms, IndexSearcher searcher) throws IOException {
return new IDFExplanation() {
@Override
public float getIdf() {
@ -279,7 +279,7 @@ public class TestOmitTf extends LuceneTestCase {
/*
* Verify the index
*/
Searcher searcher = new IndexSearcher(dir, true);
IndexSearcher searcher = new IndexSearcher(dir, true);
searcher.setSimilarity(new SimpleSimilarity());
Term a = new Term("noTf", term);

View File

@ -42,7 +42,7 @@ public class CheckHits {
* (ie: Explanation value of 0.0f)
*/
public static void checkNoMatchExplanations(Query q, String defaultFieldName,
Searcher searcher, int[] results)
IndexSearcher searcher, int[] results)
throws IOException {
String d = q.toString(defaultFieldName);
@ -81,7 +81,7 @@ public class CheckHits {
* @see #checkHits
*/
public static void checkHitCollector(Random random, Query query, String defaultFieldName,
Searcher searcher, int[] results)
IndexSearcher searcher, int[] results)
throws IOException {
QueryUtils.check(random,query,searcher);
@ -97,20 +97,10 @@ public class CheckHits {
Assert.assertEquals("Simple: " + query.toString(defaultFieldName),
correct, actual);
for (int i = -1; i < 2; i++) {
actual.clear();
QueryUtils.wrapSearcher(random, searcher, i).search(query, c);
Assert.assertEquals("Wrap Searcher " + i + ": " +
query.toString(defaultFieldName),
correct, actual);
}
if ( ! ( searcher instanceof IndexSearcher ) ) return;
for (int i = -1; i < 2; i++) {
actual.clear();
QueryUtils.wrapUnderlyingReader
(random, (IndexSearcher)searcher, i).search(query, c);
(random, searcher, i).search(query, c);
Assert.assertEquals("Wrap Reader " + i + ": " +
query.toString(defaultFieldName),
correct, actual);
@ -157,7 +147,7 @@ public class CheckHits {
Random random,
Query query,
String defaultFieldName,
Searcher searcher,
IndexSearcher searcher,
int[] results)
throws IOException {
@ -284,7 +274,7 @@ public class CheckHits {
*/
public static void checkExplanations(Query query,
String defaultFieldName,
Searcher searcher) throws IOException {
IndexSearcher searcher) throws IOException {
checkExplanations(query, defaultFieldName, searcher, false);
}
@ -301,7 +291,7 @@ public class CheckHits {
*/
public static void checkExplanations(Query query,
String defaultFieldName,
Searcher searcher,
IndexSearcher searcher,
boolean deep) throws IOException {
searcher.search(query,
@ -455,7 +445,7 @@ public class CheckHits {
public static class ExplanationAsserter extends Collector {
Query q;
Searcher s;
IndexSearcher s;
String d;
boolean deep;
@ -463,10 +453,10 @@ public class CheckHits {
private int base = 0;
/** Constructs an instance which does shallow tests on the Explanation */
public ExplanationAsserter(Query q, String defaultFieldName, Searcher s) {
public ExplanationAsserter(Query q, String defaultFieldName, IndexSearcher s) {
this(q,defaultFieldName,s,false);
}
public ExplanationAsserter(Query q, String defaultFieldName, Searcher s, boolean deep) {
public ExplanationAsserter(Query q, String defaultFieldName, IndexSearcher s, boolean deep) {
this.q=q;
this.s=s;
this.d = q.toString(defaultFieldName);

View File

@ -19,11 +19,7 @@ package org.apache.lucene.search;
import java.io.IOException;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.PriorityQueue;
@ -38,116 +34,6 @@ final class JustCompileSearch {
private static final String UNSUPPORTED_MSG = "unsupported: used for back-compat testing only !";
static final class JustCompileSearcher extends Searcher {
@Override
protected Weight createWeight(Query query) throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
@Override
public void close() throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
@Override
public Document doc(int i) throws CorruptIndexException, IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
@Override
public int[] docFreqs(Term[] terms) throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
@Override
public Explanation explain(Query query, int doc) throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
@Override
public Similarity getSimilarity() {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
@Override
public void search(Query query, Collector results) throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
@Override
public void search(Query query, Filter filter, Collector results)
throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
@Override
public TopDocs search(Query query, Filter filter, int n) throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
@Override
public TopFieldDocs search(Query query, Filter filter, int n, Sort sort)
throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
@Override
public TopDocs search(Query query, int n) throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
@Override
public void setSimilarity(Similarity similarity) {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
@Override
public int docFreq(Term term) throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
@Override
public Explanation explain(Weight weight, int doc) throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
@Override
public int maxDoc() throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
@Override
public Query rewrite(Query query) throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
@Override
public void search(Weight weight, Filter filter, Collector results)
throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
@Override
public TopDocs search(Weight weight, Filter filter, int n)
throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
@Override
public TopFieldDocs search(Weight weight, Filter filter, int n, Sort sort)
throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
@Override
public Document doc(int n, FieldSelector fieldSelector)
throws CorruptIndexException, IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
}
static final class JustCompileCollector extends Collector {
@Override

View File

@ -85,7 +85,7 @@ public class QueryUtils {
}
/** deep check that explanations of a query 'score' correctly */
public static void checkExplanations (final Query q, final Searcher s) throws IOException {
public static void checkExplanations (final Query q, final IndexSearcher s) throws IOException {
CheckHits.checkExplanations(q, null, s, true);
}
@ -100,27 +100,19 @@ public class QueryUtils {
* @see #checkSerialization
* @see #checkEqual
*/
public static void check(Random random, Query q1, Searcher s) {
public static void check(Random random, Query q1, IndexSearcher s) {
check(random, q1, s, true);
}
private static void check(Random random, Query q1, Searcher s, boolean wrap) {
private static void check(Random random, Query q1, IndexSearcher s, boolean wrap) {
try {
check(q1);
if (s!=null) {
if (s instanceof IndexSearcher) {
IndexSearcher is = (IndexSearcher)s;
checkFirstSkipTo(q1,is);
checkSkipTo(q1,is);
checkFirstSkipTo(q1,s);
checkSkipTo(q1,s);
if (wrap) {
check(random, q1, wrapUnderlyingReader(random, is, -1), false);
check(random, q1, wrapUnderlyingReader(random, is, 0), false);
check(random, q1, wrapUnderlyingReader(random, is, +1), false);
}
}
if (wrap) {
check(random,q1, wrapSearcher(random, s, -1), false);
check(random,q1, wrapSearcher(random, s, 0), false);
check(random,q1, wrapSearcher(random, s, +1), false);
check(random, q1, wrapUnderlyingReader(random, s, -1), false);
check(random, q1, wrapUnderlyingReader(random, s, 0), false);
check(random, q1, wrapUnderlyingReader(random, s, +1), false);
}
checkExplanations(q1,s);
checkSerialization(q1,s);
@ -166,39 +158,6 @@ public class QueryUtils {
out.setSimilarity(s.getSimilarity());
return out;
}
/**
* Given a Searcher, returns a new MultiSearcher wrapping the
* the original Searcher,
* as well as several "empty" IndexSearchers -- some of which will have
* deleted documents in them. This new MultiSearcher
* should behave exactly the same as the original Searcher.
* @param s the Searcher to wrap
* @param edge if negative, s will be the first sub; if 0, s will be in hte middle, if positive s will be the last sub
*/
public static MultiSearcher wrapSearcher(Random random, final Searcher s, final int edge)
throws IOException {
// we can't put deleted docs before the nested reader, because
// it will through off the docIds
Searcher[] searchers = new Searcher[] {
edge < 0 ? s : new IndexSearcher(makeEmptyIndex(random, 0), true),
new MultiSearcher(new Searcher[] {
new IndexSearcher(makeEmptyIndex(random, edge < 0 ? 65 : 0), true),
new IndexSearcher(makeEmptyIndex(random, 0), true),
0 == edge ? s : new IndexSearcher(makeEmptyIndex(random, 0), true)
}),
new IndexSearcher(makeEmptyIndex(random, 0 < edge ? 0 : 3), true),
new IndexSearcher(makeEmptyIndex(random, 0), true),
new MultiSearcher(new Searcher[] {
new IndexSearcher(makeEmptyIndex(random, 0 < edge ? 0 : 5), true),
new IndexSearcher(makeEmptyIndex(random, 0), true),
0 < edge ? s : new IndexSearcher(makeEmptyIndex(random, 0), true)
})
};
MultiSearcher out = new MultiSearcher(searchers);
out.setSimilarity(s.getSimilarity());
return out;
}
private static Directory makeEmptyIndex(Random random, final int numDeletedDocs)
throws IOException {
@ -231,7 +190,7 @@ public class QueryUtils {
/** check that the query weight is serializable.
* @throws IOException if serialization check fail.
*/
private static void checkSerialization(Query q, Searcher s) throws IOException {
private static void checkSerialization(Query q, IndexSearcher s) throws IOException {
Weight w = q.weight(s);
try {
ByteArrayOutputStream bos = new ByteArrayOutputStream();

View File

@ -372,7 +372,7 @@ public class TestBooleanMinShouldMatch extends LuceneTestCase {
protected void printHits(String test, ScoreDoc[] h, Searcher searcher) throws Exception {
protected void printHits(String test, ScoreDoc[] h, IndexSearcher searcher) throws Exception {
System.err.println("------- " + test + " -------");

View File

@ -46,7 +46,7 @@ public class TestConstantScoreQuery extends LuceneTestCase {
QueryUtils.checkUnequal(q1, new TermQuery(new Term("a", "b")));
}
private void checkHits(Searcher searcher, Query q, final float expectedScore, final String scorerClassName, final String innerScorerClassName) throws IOException {
private void checkHits(IndexSearcher searcher, Query q, final float expectedScore, final String scorerClassName, final String innerScorerClassName) throws IOException {
final int[] count = new int[1];
searcher.search(q, new Collector() {
private Scorer scorer;

View File

@ -89,7 +89,7 @@ public class TestCustomSearcherSort extends LuceneTestCase implements Serializab
Sort custSort = new Sort(
new SortField("publicationDate_", SortField.STRING),
SortField.FIELD_SCORE);
Searcher searcher = new CustomSearcher(reader, 2);
IndexSearcher searcher = new CustomSearcher(reader, 2);
// search and check hits
matchHits(searcher, custSort);
}
@ -103,28 +103,13 @@ public class TestCustomSearcherSort extends LuceneTestCase implements Serializab
Sort custSort = new Sort(
new SortField("publicationDate_", SortField.STRING),
SortField.FIELD_SCORE);
Searcher searcher = new MultiSearcher(new Searcher[] {new CustomSearcher(
reader, 2)});
// search and check hits
matchHits(searcher, custSort);
}
/**
* Run the test using two CustomSearcher instances.
*/
public void testFieldSortMultiCustomSearcher() throws Exception {
// log("Run testFieldSortMultiCustomSearcher");
// define the sort criteria
Sort custSort = new Sort(
new SortField("publicationDate_", SortField.STRING),
SortField.FIELD_SCORE);
Searcher searcher = new MultiSearcher(new CustomSearcher(reader, 0), new CustomSearcher(reader, 2));
IndexSearcher searcher = new CustomSearcher(reader, 2);
// search and check hits
matchHits(searcher, custSort);
}
// make sure the documents returned by the search match the expected list
private void matchHits(Searcher searcher, Sort sort) throws IOException {
private void matchHits(IndexSearcher searcher, Sort sort) throws IOException {
// make a query without sorting first
ScoreDoc[] hitsByRank = searcher.search(query, null, Integer.MAX_VALUE).scoreDocs;
checkHits(hitsByRank, "Sort by rank: "); // check for duplicates

View File

@ -473,7 +473,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase {
return q;
}
protected void printHits(String test, ScoreDoc[] h, Searcher searcher)
protected void printHits(String test, ScoreDoc[] h, IndexSearcher searcher)
throws Exception {
System.err.println("------- " + test + " -------");

View File

@ -1,454 +0,0 @@
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.SetBasedFieldSelector;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.store.Directory;
import java.io.IOException;
import java.util.Collections;
import java.util.HashSet;
import java.util.Map;
import java.util.Random;
import java.util.Set;
/**
* Tests {@link MultiSearcher} class.
*/
public class TestMultiSearcher extends LuceneTestCase
{
/**
* ReturnS a new instance of the concrete MultiSearcher class
* used in this test.
*/
protected MultiSearcher getMultiSearcherInstance(Searcher[] searchers) throws IOException {
return new MultiSearcher(searchers);
}
public void testEmptyIndex() throws Exception {
// creating two directories for indices
Directory indexStoreA = newDirectory();
Directory indexStoreB = newDirectory();
// creating a document to store
Document lDoc = new Document();
lDoc.add(newField("fulltext", "Once upon a time.....", Field.Store.YES, Field.Index.ANALYZED));
lDoc.add(newField("id", "doc1", Field.Store.YES, Field.Index.NOT_ANALYZED));
lDoc.add(newField("handle", "1", Field.Store.YES, Field.Index.NOT_ANALYZED));
// creating a document to store
Document lDoc2 = new Document();
lDoc2.add(newField("fulltext", "in a galaxy far far away.....",
Field.Store.YES, Field.Index.ANALYZED));
lDoc2.add(newField("id", "doc2", Field.Store.YES, Field.Index.NOT_ANALYZED));
lDoc2.add(newField("handle", "1", Field.Store.YES, Field.Index.NOT_ANALYZED));
// creating a document to store
Document lDoc3 = new Document();
lDoc3.add(newField("fulltext", "a bizarre bug manifested itself....",
Field.Store.YES, Field.Index.ANALYZED));
lDoc3.add(newField("id", "doc3", Field.Store.YES, Field.Index.NOT_ANALYZED));
lDoc3.add(newField("handle", "1", Field.Store.YES, Field.Index.NOT_ANALYZED));
// creating an index writer for the first index
IndexWriter writerA = new IndexWriter(indexStoreA, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
// creating an index writer for the second index, but writing nothing
IndexWriter writerB = new IndexWriter(indexStoreB, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
//--------------------------------------------------------------------
// scenario 1
//--------------------------------------------------------------------
// writing the documents to the first index
writerA.addDocument(lDoc);
writerA.addDocument(lDoc2);
writerA.addDocument(lDoc3);
writerA.optimize();
writerA.close();
// closing the second index
writerB.close();
// creating the query
QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, "fulltext", new MockAnalyzer());
Query query = parser.parse("handle:1");
// building the searchables
Searcher[] searchers = new Searcher[2];
// VITAL STEP:adding the searcher for the empty index first, before the searcher for the populated index
searchers[0] = new IndexSearcher(indexStoreB, true);
searchers[1] = new IndexSearcher(indexStoreA, true);
// creating the multiSearcher
Searcher mSearcher = getMultiSearcherInstance(searchers);
// performing the search
ScoreDoc[] hits = mSearcher.search(query, null, 1000).scoreDocs;
assertEquals(3, hits.length);
// iterating over the hit documents
for (int i = 0; i < hits.length; i++) {
mSearcher.doc(hits[i].doc);
}
mSearcher.close();
//--------------------------------------------------------------------
// scenario 2
//--------------------------------------------------------------------
// adding one document to the empty index
writerB = new IndexWriter(indexStoreB, newIndexWriterConfig(
TEST_VERSION_CURRENT,
new MockAnalyzer())
.setOpenMode(OpenMode.APPEND));
writerB.addDocument(lDoc);
writerB.optimize();
writerB.close();
// building the searchables
Searcher[] searchers2 = new Searcher[2];
// VITAL STEP:adding the searcher for the empty index first, before the searcher for the populated index
searchers2[0] = new IndexSearcher(indexStoreB, true);
searchers2[1] = new IndexSearcher(indexStoreA, true);
// creating the mulitSearcher
MultiSearcher mSearcher2 = getMultiSearcherInstance(searchers2);
// performing the same search
ScoreDoc[] hits2 = mSearcher2.search(query, null, 1000).scoreDocs;
assertEquals(4, hits2.length);
// iterating over the hit documents
for (int i = 0; i < hits2.length; i++) {
// no exception should happen at this point
mSearcher2.doc(hits2[i].doc);
}
// test the subSearcher() method:
Query subSearcherQuery = parser.parse("id:doc1");
hits2 = mSearcher2.search(subSearcherQuery, null, 1000).scoreDocs;
assertEquals(2, hits2.length);
assertEquals(0, mSearcher2.subSearcher(hits2[0].doc)); // hit from searchers2[0]
assertEquals(1, mSearcher2.subSearcher(hits2[1].doc)); // hit from searchers2[1]
subSearcherQuery = parser.parse("id:doc2");
hits2 = mSearcher2.search(subSearcherQuery, null, 1000).scoreDocs;
assertEquals(1, hits2.length);
assertEquals(1, mSearcher2.subSearcher(hits2[0].doc)); // hit from searchers2[1]
mSearcher2.close();
//--------------------------------------------------------------------
// scenario 3
//--------------------------------------------------------------------
// deleting the document just added, this will cause a different exception to take place
Term term = new Term("id", "doc1");
IndexReader readerB = IndexReader.open(indexStoreB, false);
readerB.deleteDocuments(term);
readerB.close();
// optimizing the index with the writer
writerB = new IndexWriter(indexStoreB, new IndexWriterConfig(
TEST_VERSION_CURRENT,
new MockAnalyzer())
.setOpenMode(OpenMode.APPEND));
writerB.optimize();
writerB.close();
// building the searchables
Searcher[] searchers3 = new Searcher[2];
searchers3[0] = new IndexSearcher(indexStoreB, true);
searchers3[1] = new IndexSearcher(indexStoreA, true);
// creating the mulitSearcher
Searcher mSearcher3 = getMultiSearcherInstance(searchers3);
// performing the same search
ScoreDoc[] hits3 = mSearcher3.search(query, null, 1000).scoreDocs;
assertEquals(3, hits3.length);
// iterating over the hit documents
for (int i = 0; i < hits3.length; i++) {
mSearcher3.doc(hits3[i].doc);
}
mSearcher3.close();
indexStoreA.close();
indexStoreB.close();
}
private Document createDocument(String contents1, String contents2) {
Document document=new Document();
document.add(newField("contents", contents1, Field.Store.YES, Field.Index.NOT_ANALYZED));
document.add(newField("other", "other contents", Field.Store.YES, Field.Index.NOT_ANALYZED));
if (contents2!=null) {
document.add(newField("contents", contents2, Field.Store.YES, Field.Index.NOT_ANALYZED));
}
return document;
}
private void initIndex(Random random, Directory directory, int nDocs, boolean create, String contents2) throws IOException {
IndexWriter indexWriter=null;
try {
indexWriter = new IndexWriter(directory, LuceneTestCase.newIndexWriterConfig(random,
TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(
create ? OpenMode.CREATE : OpenMode.APPEND));
for (int i=0; i<nDocs; i++) {
indexWriter.addDocument(createDocument("doc" + i, contents2));
}
} finally {
if (indexWriter!=null) {
indexWriter.close();
}
}
}
public void testFieldSelector() throws Exception {
Directory ramDirectory1, ramDirectory2;
IndexSearcher indexSearcher1, indexSearcher2;
ramDirectory1 = newDirectory();
ramDirectory2 = newDirectory();
Query query = new TermQuery(new Term("contents", "doc0"));
// Now put the documents in a different index
initIndex(random, ramDirectory1, 10, true, null); // documents with a single token "doc0", "doc1", etc...
initIndex(random, ramDirectory2, 10, true, "x"); // documents with two tokens "doc0" and "x", "doc1" and x, etc...
indexSearcher1 = new IndexSearcher(ramDirectory1, true);
indexSearcher2 = new IndexSearcher(ramDirectory2, true);
MultiSearcher searcher = getMultiSearcherInstance(new Searcher[]{indexSearcher1, indexSearcher2});
assertTrue("searcher is null and it shouldn't be", searcher != null);
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
assertTrue("hits is null and it shouldn't be", hits != null);
assertTrue(hits.length + " does not equal: " + 2, hits.length == 2);
Document document = searcher.doc(hits[0].doc);
assertTrue("document is null and it shouldn't be", document != null);
assertTrue("document.getFields() Size: " + document.getFields().size() + " is not: " + 2, document.getFields().size() == 2);
//Should be one document from each directory
//they both have two fields, contents and other
Set<String> ftl = new HashSet<String>();
ftl.add("other");
SetBasedFieldSelector fs = new SetBasedFieldSelector(ftl, Collections. <String> emptySet());
document = searcher.doc(hits[0].doc, fs);
assertTrue("document is null and it shouldn't be", document != null);
assertTrue("document.getFields() Size: " + document.getFields().size() + " is not: " + 1, document.getFields().size() == 1);
String value = document.get("contents");
assertTrue("value is not null and it should be", value == null);
value = document.get("other");
assertTrue("value is null and it shouldn't be", value != null);
ftl.clear();
ftl.add("contents");
fs = new SetBasedFieldSelector(ftl, Collections. <String> emptySet());
document = searcher.doc(hits[1].doc, fs);
value = document.get("contents");
assertTrue("value is null and it shouldn't be", value != null);
value = document.get("other");
assertTrue("value is not null and it should be", value == null);
indexSearcher1.close();
indexSearcher2.close();
ramDirectory1.close();
ramDirectory2.close();
searcher.close();
}
/* uncomment this when the highest score is always normalized to 1.0, even when it was < 1.0
public void testNormalization1() throws IOException {
testNormalization(1, "Using 1 document per index:");
}
*/
public void testNormalization10() throws IOException {
testNormalization(10, "Using 10 documents per index:");
}
private void testNormalization(int nDocs, String message) throws IOException {
Query query=new TermQuery(new Term("contents", "doc0"));
Directory ramDirectory1;
IndexSearcher indexSearcher1;
ScoreDoc[] hits;
ramDirectory1=newDirectory();
// First put the documents in the same index
initIndex(random, ramDirectory1, nDocs, true, null); // documents with a single token "doc0", "doc1", etc...
initIndex(random, ramDirectory1, nDocs, false, "x"); // documents with two tokens "doc0" and "x", "doc1" and x, etc...
indexSearcher1=new IndexSearcher(ramDirectory1, true);
indexSearcher1.setDefaultFieldSortScoring(true, true);
hits=indexSearcher1.search(query, null, 1000).scoreDocs;
assertEquals(message, 2, hits.length);
// Store the scores for use later
float[] scores={ hits[0].score, hits[1].score };
assertTrue(message, scores[0] > scores[1]);
indexSearcher1.close();
ramDirectory1.close();
hits=null;
Directory ramDirectory2;
IndexSearcher indexSearcher2;
ramDirectory1=newDirectory();
ramDirectory2=newDirectory();
// Now put the documents in a different index
initIndex(random, ramDirectory1, nDocs, true, null); // documents with a single token "doc0", "doc1", etc...
initIndex(random, ramDirectory2, nDocs, true, "x"); // documents with two tokens "doc0" and "x", "doc1" and x, etc...
indexSearcher1=new IndexSearcher(ramDirectory1, true);
indexSearcher1.setDefaultFieldSortScoring(true, true);
indexSearcher2=new IndexSearcher(ramDirectory2, true);
indexSearcher2.setDefaultFieldSortScoring(true, true);
Searcher searcher=getMultiSearcherInstance(new Searcher[] { indexSearcher1, indexSearcher2 });
hits=searcher.search(query, null, 1000).scoreDocs;
assertEquals(message, 2, hits.length);
// The scores should be the same (within reason)
assertEquals(message, scores[0], hits[0].score, 1e-6); // This will a document from ramDirectory1
assertEquals(message, scores[1], hits[1].score, 1e-6); // This will a document from ramDirectory2
// Adding a Sort.RELEVANCE object should not change anything
hits=searcher.search(query, null, 1000, Sort.RELEVANCE).scoreDocs;
assertEquals(message, 2, hits.length);
assertEquals(message, scores[0], hits[0].score, 1e-6); // This will a document from ramDirectory1
assertEquals(message, scores[1], hits[1].score, 1e-6); // This will a document from ramDirectory2
searcher.close();
ramDirectory1.close();
ramDirectory2.close();
}
/**
* test that custom similarity is in effect when using MultiSearcher (LUCENE-789).
* @throws IOException
*/
public void testCustomSimilarity () throws IOException {
Directory dir = newDirectory();
initIndex(random, dir, 10, true, "x"); // documents with two tokens "doc0" and "x", "doc1" and x, etc...
IndexSearcher srchr = new IndexSearcher(dir, true);
MultiSearcher msrchr = getMultiSearcherInstance(new Searcher[]{srchr});
Similarity customSimilarity = new DefaultSimilarity() {
// overide all
@Override
public float idf(int docFreq, int numDocs) { return 100.0f; }
@Override
public float coord(int overlap, int maxOverlap) { return 1.0f; }
@Override
public float lengthNorm(String fieldName, int numTokens) { return 1.0f; }
@Override
public float queryNorm(float sumOfSquaredWeights) { return 1.0f; }
@Override
public float sloppyFreq(int distance) { return 1.0f; }
@Override
public float tf(float freq) { return 1.0f; }
};
srchr.setSimilarity(customSimilarity);
msrchr.setSimilarity(customSimilarity);
Query query=new TermQuery(new Term("contents", "doc0"));
// Get a score from IndexSearcher
TopDocs topDocs = srchr.search(query, null, 1);
float score1 = topDocs.getMaxScore();
// Get the score from MultiSearcher
topDocs = msrchr.search(query, null, 1);
float scoreN = topDocs.getMaxScore();
// The scores from the IndexSearcher and Multisearcher should be the same
// if the same similarity is used.
assertEquals("MultiSearcher score must be equal to single searcher score!", score1, scoreN, 1e-6);
msrchr.close();
srchr.close();
dir.close();
}
public void testDocFreq() throws IOException{
Directory dir1 = newDirectory();
Directory dir2 = newDirectory();
initIndex(random, dir1, 10, true, "x"); // documents with two tokens "doc0" and "x", "doc1" and x, etc...
initIndex(random, dir2, 5, true, "x"); // documents with two tokens "doc0" and "x", "doc1" and x, etc...
IndexSearcher searcher1 = new IndexSearcher(dir1, true);
IndexSearcher searcher2 = new IndexSearcher(dir2, true);
MultiSearcher multiSearcher = getMultiSearcherInstance(new Searcher[]{searcher1, searcher2});
assertEquals(15, multiSearcher.docFreq(new Term("contents","x")));
multiSearcher.close();
searcher1.close();
searcher2.close();
dir1.close();
dir2.close();
}
public void testCreateDocFrequencyMap() throws IOException{
Directory dir1 = newDirectory();
Directory dir2 = newDirectory();
Term template = new Term("contents") ;
String[] contents = {"a", "b", "c"};
HashSet<Term> termsSet = new HashSet<Term>();
for (int i = 0; i < contents.length; i++) {
initIndex(random, dir1, i+10, i==0, contents[i]);
initIndex(random, dir2, i+5, i==0, contents[i]);
termsSet.add(template.createTerm(contents[i]));
}
IndexSearcher searcher1 = new IndexSearcher(dir1, true);
IndexSearcher searcher2 = new IndexSearcher(dir2, true);
MultiSearcher multiSearcher = getMultiSearcherInstance(new Searcher[]{searcher1, searcher2});
Map<Term,Integer> docFrequencyMap = multiSearcher.createDocFrequencyMap(termsSet);
assertEquals(3, docFrequencyMap.size());
for (int i = 0; i < contents.length; i++) {
assertEquals(Integer.valueOf((i*2) +15), docFrequencyMap.get(template.createTerm(contents[i])));
}
multiSearcher.close();
searcher1.close();
searcher2.close();
dir1.close();
dir2.close();
}
}

View File

@ -1,173 +0,0 @@
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.store.Directory;
import java.io.IOException;
/**
* Tests {@link MultiSearcher} ranking, i.e. makes sure this bug is fixed:
* http://issues.apache.org/bugzilla/show_bug.cgi?id=31841
*
*/
public class TestMultiSearcherRanking extends LuceneTestCase {
private final String FIELD_NAME = "body";
private Searcher multiSearcher;
private Searcher singleSearcher;
public void testOneTermQuery() throws IOException, ParseException {
checkQuery("three");
}
public void testTwoTermQuery() throws IOException, ParseException {
checkQuery("three foo");
}
public void testPrefixQuery() throws IOException, ParseException {
checkQuery("multi*");
}
public void testFuzzyQuery() throws IOException, ParseException {
checkQuery("multiThree~");
}
public void testRangeQuery() throws IOException, ParseException {
checkQuery("{multiA TO multiP}");
}
public void testMultiPhraseQuery() throws IOException, ParseException {
checkQuery("\"blueberry pi*\"");
}
public void testNoMatchQuery() throws IOException, ParseException {
checkQuery("+three +nomatch");
}
/*
public void testTermRepeatedQuery() throws IOException, ParseException {
// TODO: this corner case yields different results.
checkQuery("multi* multi* foo");
}
*/
/**
* checks if a query yields the same result when executed on
* a single IndexSearcher containing all documents and on a
* MultiSearcher aggregating sub-searchers
* @param queryStr the query to check.
* @throws IOException
* @throws ParseException
*/
private void checkQuery(String queryStr) throws IOException, ParseException {
// check result hit ranking
if(VERBOSE) System.out.println("Query: " + queryStr);
QueryParser queryParser = new QueryParser(TEST_VERSION_CURRENT, FIELD_NAME, new MockAnalyzer());
Query query = queryParser.parse(queryStr);
ScoreDoc[] multiSearcherHits = multiSearcher.search(query, null, 1000).scoreDocs;
ScoreDoc[] singleSearcherHits = singleSearcher.search(query, null, 1000).scoreDocs;
assertEquals(multiSearcherHits.length, singleSearcherHits.length);
for (int i = 0; i < multiSearcherHits.length; i++) {
Document docMulti = multiSearcher.doc(multiSearcherHits[i].doc);
Document docSingle = singleSearcher.doc(singleSearcherHits[i].doc);
if(VERBOSE) System.out.println("Multi: " + docMulti.get(FIELD_NAME) + " score="
+ multiSearcherHits[i].score);
if(VERBOSE) System.out.println("Single: " + docSingle.get(FIELD_NAME) + " score="
+ singleSearcherHits[i].score);
assertEquals(multiSearcherHits[i].score, singleSearcherHits[i].score,
0.001f);
assertEquals(docMulti.get(FIELD_NAME), docSingle.get(FIELD_NAME));
}
if(VERBOSE) System.out.println();
}
/**
* initializes multiSearcher and singleSearcher with the same document set
*/
@Override
public void setUp() throws Exception {
super.setUp();
// create MultiSearcher from two seperate searchers
d1 = newDirectory();
IndexWriter iw1 = new IndexWriter(d1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
addCollection1(iw1);
iw1.close();
d2 = newDirectory();
IndexWriter iw2 = new IndexWriter(d2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
addCollection2(iw2);
iw2.close();
Searchable[] s = new Searchable[2];
s[0] = new IndexSearcher(d1, true);
s[1] = new IndexSearcher(d2, true);
multiSearcher = new MultiSearcher(s);
// create IndexSearcher which contains all documents
d = newDirectory();
IndexWriter iw = new IndexWriter(d, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
addCollection1(iw);
addCollection2(iw);
iw.close();
singleSearcher = new IndexSearcher(d, true);
}
Directory d1, d2, d;
@Override
public void tearDown() throws Exception {
multiSearcher.close();
singleSearcher.close();
d1.close();
d2.close();
d.close();
super.tearDown();
}
private void addCollection1(IndexWriter iw) throws IOException {
add("one blah three", iw);
add("one foo three multiOne", iw);
add("one foobar three multiThree", iw);
add("blueberry pie", iw);
add("blueberry strudel", iw);
add("blueberry pizza", iw);
}
private void addCollection2(IndexWriter iw) throws IOException {
add("two blah three", iw);
add("two foo xxx multiTwo", iw);
add("two foobar xxx multiThreee", iw);
add("blueberry chewing gum", iw);
add("bluebird pizza", iw);
add("bluebird foobar pizza", iw);
add("piccadilly circus", iw);
}
private void add(String value, IndexWriter iw) throws IOException {
Document d = new Document();
d.add(newField(FIELD_NAME, value, Field.Store.YES, Field.Index.ANALYZED));
iw.addDocument(d);
}
}

View File

@ -59,7 +59,7 @@ public class TestMultiValuedNumericRangeQuery extends LuceneTestCase {
IndexReader reader = writer.getReader();
writer.close();
Searcher searcher=new IndexSearcher(reader);
IndexSearcher searcher=new IndexSearcher(reader);
num = 50 * RANDOM_MULTIPLIER;
for (int i = 0; i < num; i++) {
int lower=random.nextInt(Integer.MAX_VALUE);

View File

@ -44,7 +44,7 @@ public class TestNot extends LuceneTestCase {
writer.addDocument(d1);
IndexReader reader = writer.getReader();
Searcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = new IndexSearcher(reader);
QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer());
Query query = parser.parse("a NOT b");
//System.out.println(query);

View File

@ -1,51 +0,0 @@
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import org.apache.lucene.util._TestUtil;
/**
* Unit tests for the ParallelMultiSearcher
*/
public class TestParallelMultiSearcher extends TestMultiSearcher {
List<ExecutorService> pools = new ArrayList<ExecutorService>();
@Override
public void tearDown() throws Exception {
for (ExecutorService exec : pools)
exec.awaitTermination(1000, TimeUnit.MILLISECONDS);
pools.clear();
super.tearDown();
}
@Override
protected MultiSearcher getMultiSearcherInstance(Searcher[] searchers)
throws IOException {
ExecutorService exec = Executors.newFixedThreadPool(_TestUtil.nextInt(random, 2, 8));
pools.add(exec);
return new ParallelMultiSearcher(exec, searchers);
}
}

View File

@ -352,7 +352,7 @@ public class TestPhraseQuery extends LuceneTestCase {
IndexReader reader = writer.getReader();
writer.close();
Searcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = new IndexSearcher(reader);
PhraseQuery query = new PhraseQuery();
query.add(new Term("field", "firstname"));
query.add(new Term("field", "lastname"));

View File

@ -38,7 +38,7 @@ import org.apache.lucene.util._TestUtil;
* and validates the correct number of hits are returned.
*/
public class TestRegexpRandom extends LuceneTestCase {
private Searcher searcher;
private IndexSearcher searcher;
private IndexReader reader;
private Directory dir;

View File

@ -44,7 +44,7 @@ public class TestSimilarity extends LuceneTestCase {
@Override public float sloppyFreq(int distance) { return 2.0f; }
@Override public float idf(int docFreq, int numDocs) { return 1.0f; }
@Override public float coord(int overlap, int maxOverlap) { return 1.0f; }
@Override public IDFExplanation idfExplain(Collection<Term> terms, Searcher searcher) throws IOException {
@Override public IDFExplanation idfExplain(Collection<Term> terms, IndexSearcher searcher) throws IOException {
return new IDFExplanation() {
@Override
public float getIdf() {
@ -75,7 +75,7 @@ public class TestSimilarity extends LuceneTestCase {
IndexReader reader = writer.getReader();
writer.close();
Searcher searcher = new IndexSearcher(reader);
IndexSearcher searcher = new IndexSearcher(reader);
searcher.setSimilarity(new SimpleSimilarity());
Term a = new Term("field", "a");

View File

@ -17,18 +17,6 @@ package org.apache.lucene.search;
* limitations under the License.
*/
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.spans.SpanNearQuery;
import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.store.Directory;
/**
* TestExplanations subclass focusing on basic query types
*/
@ -301,73 +289,4 @@ public class TestSimpleExplanations extends TestExplanations {
qtest(q, new int[] { 0,3 });
}
public void testTermQueryMultiSearcherExplain() throws Exception {
// creating two directories for indices
Directory indexStoreA = newDirectory();
Directory indexStoreB = newDirectory();
Document lDoc = new Document();
lDoc.add(newField("handle", "1 2", Field.Store.YES, Field.Index.ANALYZED));
Document lDoc2 = new Document();
lDoc2.add(newField("handle", "1 2", Field.Store.YES, Field.Index.ANALYZED));
Document lDoc3 = new Document();
lDoc3.add(newField("handle", "1 2", Field.Store.YES, Field.Index.ANALYZED));
IndexWriter writerA = new IndexWriter(indexStoreA, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
IndexWriter writerB = new IndexWriter(indexStoreB, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
writerA.addDocument(lDoc);
writerA.addDocument(lDoc2);
writerA.optimize();
writerA.close();
writerB.addDocument(lDoc3);
writerB.close();
QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, "fulltext", new MockAnalyzer());
Query query = parser.parse("handle:1");
Searcher[] searchers = new Searcher[2];
searchers[0] = new IndexSearcher(indexStoreB, true);
searchers[1] = new IndexSearcher(indexStoreA, true);
Searcher mSearcher = new MultiSearcher(searchers);
ScoreDoc[] hits = mSearcher.search(query, null, 1000).scoreDocs;
assertEquals(3, hits.length);
Explanation explain = mSearcher.explain(query, hits[0].doc);
String exp = explain.toString(0);
assertTrue(exp, exp.indexOf("maxDocs=3") > -1);
assertTrue(exp, exp.indexOf("docFreq=3") > -1);
query = parser.parse("handle:\"1 2\"");
hits = mSearcher.search(query, null, 1000).scoreDocs;
assertEquals(3, hits.length);
explain = mSearcher.explain(query, hits[0].doc);
exp = explain.toString(0);
assertTrue(exp, exp.indexOf("1=3") > -1);
assertTrue(exp, exp.indexOf("2=3") > -1);
query = new SpanNearQuery(new SpanQuery[] {
new SpanTermQuery(new Term("handle", "1")),
new SpanTermQuery(new Term("handle", "2")) }, 0, true);
hits = mSearcher.search(query, null, 1000).scoreDocs;
assertEquals(3, hits.length);
explain = mSearcher.explain(query, hits[0].doc);
exp = explain.toString(0);
assertTrue(exp, exp.indexOf("1=3") > -1);
assertTrue(exp, exp.indexOf("2=3") > -1);
mSearcher.close();
indexStoreA.close();
indexStoreB.close();
}
}

View File

@ -36,9 +36,9 @@ import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.MultiReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.FieldValueHitQueue.Entry;
@ -49,8 +49,9 @@ import org.apache.lucene.search.cache.FloatValuesCreator;
import org.apache.lucene.search.cache.IntValuesCreator;
import org.apache.lucene.search.cache.LongValuesCreator;
import org.apache.lucene.search.cache.ShortValuesCreator;
import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.DocIdBitSet;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util._TestUtil;
@ -456,7 +457,7 @@ public class TestSort extends LuceneTestCase implements Serializable {
// test sorts when there's nothing in the index
public void testEmptyIndex() throws Exception {
Searcher empty = getEmptyIndex();
IndexSearcher empty = getEmptyIndex();
sort = new Sort();
assertMatches (empty, queryX, sort, "");
@ -599,23 +600,9 @@ public class TestSort extends LuceneTestCase implements Serializable {
new SortField ("float", SortField.FLOAT, true) );
assertMatches (full, queryG, sort, "ZYXW");
// Do the same for a MultiSearcher
Searcher multiSearcher=new MultiSearcher (full);
sort.setSort (new SortField ("int", SortField.INT),
new SortField ("string", SortField.STRING),
new SortField ("float", SortField.FLOAT) );
assertMatches (multiSearcher, queryG, sort, "ZWXY");
sort.setSort (new SortField ("int", SortField.INT),
new SortField ("string", SortField.STRING),
new SortField ("float", SortField.FLOAT, true) );
assertMatches (multiSearcher, queryG, sort, "ZYXW");
// Don't close the multiSearcher. it would close the full searcher too!
// Do the same for a ParallelMultiSearcher
ExecutorService exec = Executors.newFixedThreadPool(_TestUtil.nextInt(random, 2, 8));
Searcher parallelSearcher=new ParallelMultiSearcher (exec, full);
IndexSearcher parallelSearcher=new IndexSearcher (full.getIndexReader(), exec);
sort.setSort (new SortField ("int", SortField.INT),
new SortField ("string", SortField.STRING),
@ -627,6 +614,7 @@ public class TestSort extends LuceneTestCase implements Serializable {
new SortField ("float", SortField.FLOAT, true) );
assertMatches (parallelSearcher, queryG, sort, "ZYXW");
parallelSearcher.close();
exec.shutdown();
exec.awaitTermination(1000, TimeUnit.MILLISECONDS);
}
@ -672,117 +660,19 @@ public class TestSort extends LuceneTestCase implements Serializable {
assertMatches (full, queryX, sort, "EACGI");
}
// Test the MultiSearcher's ability to preserve locale-sensitive ordering
// by wrapping it around a single searcher
public void testInternationalMultiSearcherSort() throws Exception {
Searcher multiSearcher = new MultiSearcher (full);
sort.setSort (new SortField ("i18n", new Locale("sv", "se")));
assertMatches (multiSearcher, queryY, sort, "BJDFH");
sort.setSort (new SortField ("i18n", Locale.US));
assertMatches (multiSearcher, queryY, sort, oStrokeFirst ? "BFJHD" : "BFJDH");
sort.setSort (new SortField ("i18n", new Locale("da", "dk")));
assertMatches (multiSearcher, queryY, sort, "BJDHF");
}
// test a variety of sorts using more than one searcher
public void testMultiSort() throws Exception {
MultiSearcher searcher = new MultiSearcher (searchX, searchY);
runMultiSorts(searcher, false);
}
// test a variety of sorts using a parallel multisearcher
public void testParallelMultiSort() throws Exception {
ExecutorService exec = Executors.newFixedThreadPool(_TestUtil.nextInt(random, 2, 8));
Searcher searcher = new ParallelMultiSearcher (exec, searchX, searchY);
IndexSearcher searcher = new IndexSearcher(
new MultiReader(
new IndexReader[] {searchX.getIndexReader(),
searchY.getIndexReader()}), exec);
runMultiSorts(searcher, false);
searcher.close();
exec.shutdown();
exec.awaitTermination(1000, TimeUnit.MILLISECONDS);
}
// test that the relevancy scores are the same even if
// hits are sorted
public void testNormalizedScores() throws Exception {
// capture relevancy scores
HashMap<String,Float> scoresX = getScores (full.search (queryX, null, 1000).scoreDocs, full);
HashMap<String,Float> scoresY = getScores (full.search (queryY, null, 1000).scoreDocs, full);
HashMap<String,Float> scoresA = getScores (full.search (queryA, null, 1000).scoreDocs, full);
// we'll test searching locally, remote and multi
MultiSearcher multi = new MultiSearcher (searchX, searchY);
// change sorting and make sure relevancy stays the same
sort = new Sort();
assertSameValues (scoresX, getScores (full.search (queryX, null, 1000, sort).scoreDocs, full));
assertSameValues (scoresX, getScores (multi.search (queryX, null, 1000, sort).scoreDocs, multi));
assertSameValues (scoresY, getScores (full.search (queryY, null, 1000, sort).scoreDocs, full));
assertSameValues (scoresY, getScores (multi.search (queryY, null, 1000, sort).scoreDocs, multi));
assertSameValues (scoresA, getScores (full.search (queryA, null, 1000, sort).scoreDocs, full));
assertSameValues (scoresA, getScores (multi.search (queryA, null, 1000, sort).scoreDocs, multi));
sort.setSort(SortField.FIELD_DOC);
assertSameValues (scoresX, getScores (full.search (queryX, null, 1000, sort).scoreDocs, full));
assertSameValues (scoresX, getScores (multi.search (queryX, null, 1000, sort).scoreDocs, multi));
assertSameValues (scoresY, getScores (full.search (queryY, null, 1000, sort).scoreDocs, full));
assertSameValues (scoresY, getScores (multi.search (queryY, null, 1000, sort).scoreDocs, multi));
assertSameValues (scoresA, getScores (full.search (queryA, null, 1000, sort).scoreDocs, full));
assertSameValues (scoresA, getScores (multi.search (queryA, null, 1000, sort).scoreDocs, multi));
sort.setSort (new SortField("int", SortField.INT));
assertSameValues (scoresX, getScores (full.search (queryX, null, 1000, sort).scoreDocs, full));
assertSameValues (scoresX, getScores (multi.search (queryX, null, 1000, sort).scoreDocs, multi));
assertSameValues (scoresY, getScores (full.search (queryY, null, 1000, sort).scoreDocs, full));
assertSameValues (scoresY, getScores (multi.search (queryY, null, 1000, sort).scoreDocs, multi));
assertSameValues (scoresA, getScores (full.search (queryA, null, 1000, sort).scoreDocs, full));
assertSameValues (scoresA, getScores (multi.search (queryA, null, 1000, sort).scoreDocs, multi));
sort.setSort (new SortField("float", SortField.FLOAT));
assertSameValues (scoresX, getScores (full.search (queryX, null, 1000, sort).scoreDocs, full));
assertSameValues (scoresX, getScores (multi.search (queryX, null, 1000, sort).scoreDocs, multi));
assertSameValues (scoresY, getScores (full.search (queryY, null, 1000, sort).scoreDocs, full));
assertSameValues (scoresY, getScores (multi.search (queryY, null, 1000, sort).scoreDocs, multi));
assertSameValues (scoresA, getScores (full.search (queryA, null, 1000, sort).scoreDocs, full));
assertSameValues (scoresA, getScores (multi.search (queryA, null, 1000, sort).scoreDocs, multi));
sort.setSort (new SortField("string", SortField.STRING));
assertSameValues (scoresX, getScores (full.search (queryX, null, 1000, sort).scoreDocs, full));
assertSameValues (scoresX, getScores (multi.search (queryX, null, 1000, sort).scoreDocs, multi));
assertSameValues (scoresY, getScores (full.search (queryY, null, 1000, sort).scoreDocs, full));
assertSameValues (scoresY, getScores (multi.search (queryY, null, 1000, sort).scoreDocs, multi));
assertSameValues (scoresA, getScores (full.search (queryA, null, 1000, sort).scoreDocs, full));
assertSameValues (scoresA, getScores (multi.search (queryA, null, 1000, sort).scoreDocs, multi));
sort.setSort (new SortField("int", SortField.INT),new SortField("float", SortField.FLOAT));
assertSameValues (scoresX, getScores (full.search (queryX, null, 1000, sort).scoreDocs, full));
assertSameValues (scoresX, getScores (multi.search (queryX, null, 1000, sort).scoreDocs, multi));
assertSameValues (scoresY, getScores (full.search (queryY, null, 1000, sort).scoreDocs, full));
assertSameValues (scoresY, getScores (multi.search (queryY, null, 1000, sort).scoreDocs, multi));
assertSameValues (scoresA, getScores (full.search (queryA, null, 1000, sort).scoreDocs, full));
assertSameValues (scoresA, getScores (multi.search (queryA, null, 1000, sort).scoreDocs, multi));
sort.setSort (new SortField ("int", SortField.INT, true), new SortField (null, SortField.DOC, true) );
assertSameValues (scoresX, getScores (full.search (queryX, null, 1000, sort).scoreDocs, full));
assertSameValues (scoresX, getScores (multi.search (queryX, null, 1000, sort).scoreDocs, multi));
assertSameValues (scoresY, getScores (full.search (queryY, null, 1000, sort).scoreDocs, full));
assertSameValues (scoresY, getScores (multi.search (queryY, null, 1000, sort).scoreDocs, multi));
assertSameValues (scoresA, getScores (full.search (queryA, null, 1000, sort).scoreDocs, full));
assertSameValues (scoresA, getScores (multi.search (queryA, null, 1000, sort).scoreDocs, multi));
sort.setSort (new SortField("int", SortField.INT),new SortField("string", SortField.STRING));
assertSameValues (scoresX, getScores (full.search (queryX, null, 1000, sort).scoreDocs, full));
assertSameValues (scoresX, getScores (multi.search (queryX, null, 1000, sort).scoreDocs, multi));
assertSameValues (scoresY, getScores (full.search (queryY, null, 1000, sort).scoreDocs, full));
assertSameValues (scoresY, getScores (multi.search (queryY, null, 1000, sort).scoreDocs, multi));
assertSameValues (scoresA, getScores (full.search (queryA, null, 1000, sort).scoreDocs, full));
assertSameValues (scoresA, getScores (multi.search (queryA, null, 1000, sort).scoreDocs, multi));
}
public void testTopDocsScores() throws Exception {
// There was previously a bug in FieldSortedHitQueue.maxscore when only a single
@ -1024,7 +914,7 @@ public class TestSort extends LuceneTestCase implements Serializable {
}
// runs a variety of sorts useful for multisearchers
private void runMultiSorts(Searcher multi, boolean isFull) throws Exception {
private void runMultiSorts(IndexSearcher multi, boolean isFull) throws Exception {
sort.setSort(SortField.FIELD_DOC);
String expected = isFull ? "ABCDEFGHIJ" : "ACEGIBDFHJ";
assertMatches(multi, queryA, sort, expected);
@ -1101,12 +991,12 @@ public class TestSort extends LuceneTestCase implements Serializable {
}
private void assertMatches(Searcher searcher, Query query, Sort sort, String expectedResult) throws IOException {
private void assertMatches(IndexSearcher searcher, Query query, Sort sort, String expectedResult) throws IOException {
assertMatches( null, searcher, query, sort, expectedResult );
}
// make sure the documents returned by the search match the expected list
private void assertMatches(String msg, Searcher searcher, Query query, Sort sort,
private void assertMatches(String msg, IndexSearcher searcher, Query query, Sort sort,
String expectedResult) throws IOException {
//ScoreDoc[] result = searcher.search (query, null, 1000, sort).scoreDocs;
TopDocs hits = searcher.search (query, null, Math.max(1, expectedResult.length()), sort);
@ -1124,7 +1014,7 @@ public class TestSort extends LuceneTestCase implements Serializable {
assertEquals (msg, expectedResult, buff.toString());
}
private HashMap<String,Float> getScores (ScoreDoc[] hits, Searcher searcher)
private HashMap<String,Float> getScores (ScoreDoc[] hits, IndexSearcher searcher)
throws IOException {
HashMap<String,Float> scoreMap = new HashMap<String,Float>();
int n = hits.length;

View File

@ -134,7 +134,7 @@ public class TestTermRangeQuery extends LuceneTestCase {
searcher.close();
}
private void checkBooleanTerms(Searcher searcher, TermRangeQuery query, String... terms) throws IOException {
private void checkBooleanTerms(IndexSearcher searcher, TermRangeQuery query, String... terms) throws IOException {
query.setRewriteMethod(new MultiTermQuery.TopTermsScoringBooleanQueryRewrite(50));
final BooleanQuery bq = (BooleanQuery) searcher.rewrite(query);
final Set<String> allowedTerms = asSet(terms);

View File

@ -50,7 +50,7 @@ public class TestTimeLimitingCollector extends LuceneTestCase {
private static final int N_DOCS = 3000;
private static final int N_THREADS = 50;
private Searcher searcher;
private IndexSearcher searcher;
private Directory directory;
private IndexReader reader;

View File

@ -38,7 +38,7 @@ import org.apache.lucene.util._TestUtil;
* and validates the correct number of hits are returned.
*/
public class TestWildcardRandom extends LuceneTestCase {
private Searcher searcher;
private IndexSearcher searcher;
private IndexReader reader;
private Directory dir;

View File

@ -329,7 +329,7 @@ public class TestCustomScoreQuery extends FunctionTestSetup {
}
}
private void logResult(String msg, Searcher s, Query q, int doc, float score1) throws IOException {
private void logResult(String msg, IndexSearcher s, Query q, int doc, float score1) throws IOException {
log(msg+" "+score1);
log("Explain by: "+q);
log(s.explain(q,doc));

View File

@ -32,10 +32,9 @@ import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.DefaultSimilarity;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.QueryUtils;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.search.spans.SpanNearQuery;
@ -325,7 +324,7 @@ public class TestPayloadNearQuery extends LuceneTestCase {
return 1.0f;
}
// idf used for phrase queries
@Override public IDFExplanation idfExplain(Collection<Term> terms, Searcher searcher) throws IOException {
@Override public IDFExplanation idfExplain(Collection<Term> terms, IndexSearcher searcher) throws IOException {
return new IDFExplanation() {
@Override
public float getIdf() {

View File

@ -24,7 +24,6 @@ import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.FuzzyQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.WildcardQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase;
@ -35,7 +34,7 @@ import org.apache.lucene.util.LuceneTestCase;
public class TestSpanMultiTermQueryWrapper extends LuceneTestCase {
private Directory directory;
private IndexReader reader;
private Searcher searcher;
private IndexSearcher searcher;
@Override
public void setUp() throws Exception {

View File

@ -18,14 +18,13 @@ package org.apache.lucene.search.spans;
*/
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.CheckHits;
import org.apache.lucene.search.Similarity;
import org.apache.lucene.search.DefaultSimilarity;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.store.Directory;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.index.IndexWriter;
@ -416,7 +415,7 @@ public class TestSpans extends LuceneTestCase {
slop,
ordered) {
@Override
public Similarity getSimilarity(Searcher s) {
public Similarity getSimilarity(IndexSearcher s) {
return sim;
}
};
@ -439,7 +438,7 @@ public class TestSpans extends LuceneTestCase {
}
// LUCENE-1404
private int hitCount(Searcher searcher, String word) throws Throwable {
private int hitCount(IndexSearcher searcher, String word) throws Throwable {
return searcher.search(new TermQuery(new Term("text", word)), 10).totalHits;
}

View File

@ -134,7 +134,7 @@ public class TestSpansAdvanced extends LuceneTestCase {
*
* @throws IOException
*/
protected static void assertHits(Searcher s, Query query,
protected static void assertHits(IndexSearcher s, Query query,
final String description, final String[] expectedIds,
final float[] expectedScores) throws IOException {
QueryUtils.check(random, query, s);

View File

@ -26,13 +26,12 @@ import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermRangeFilter;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TermRangeQuery;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
import org.apache.lucene.document.Field;
@ -215,7 +214,7 @@ public abstract class CollationTestBase extends LuceneTestCase {
}
writer.optimize();
writer.close();
Searcher searcher = new IndexSearcher(indexStore, true);
IndexSearcher searcher = new IndexSearcher(indexStore, true);
Sort sort = new Sort();
Query queryX = new TermQuery(new Term ("contents", "x"));
@ -236,7 +235,7 @@ public abstract class CollationTestBase extends LuceneTestCase {
// Make sure the documents returned by the search match the expected list
// Copied from TestSort.java
private void assertMatches(Searcher searcher, Query query, Sort sort,
private void assertMatches(IndexSearcher searcher, Query query, Sort sort,
String expectedResult) throws IOException {
ScoreDoc[] result = searcher.search(query, null, 1000, sort).scoreDocs;
StringBuilder buff = new StringBuilder(10);

View File

@ -36,7 +36,6 @@ import org.apache.lucene.search.MultiTermQuery;
import org.apache.lucene.search.TopFieldCollector;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopScoreDocCollector;
import org.apache.lucene.search.Weight;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Sort;
@ -114,12 +113,16 @@ public abstract class ReadTask extends PerfTask {
if (numHits > 0) {
if (withCollector() == false) {
if (sort != null) {
Weight w = q.weight(searcher);
// TODO: instead of always passing false we
// should detect based on the query; if we make
// the IndexSearcher search methods that take
// Weight public again, we can go back to
// pulling the Weight ourselves:
TopFieldCollector collector = TopFieldCollector.create(sort, numHits,
true, withScore(),
withMaxScore(),
!w.scoresDocsOutOfOrder());
searcher.search(w, null, collector);
false);
searcher.search(q, null, collector);
hits = collector.topDocs();
} else {
hits = searcher.search(q, numHits);

View File

@ -23,7 +23,7 @@ import org.apache.lucene.benchmark.quality.utils.DocNameExtractor;
import org.apache.lucene.benchmark.quality.utils.SubmissionReport;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.TopDocs;
/**
@ -47,7 +47,7 @@ public class QualityBenchmark {
protected QualityQueryParser qqParser;
/** Index to be searched. */
protected Searcher searcher;
protected IndexSearcher searcher;
/** index field to extract doc name for each search result; used for judging the results. */
protected String docNameField;
@ -68,7 +68,7 @@ public class QualityBenchmark {
* and is important for judging the results.
*/
public QualityBenchmark(QualityQuery qqs[], QualityQueryParser qqParser,
Searcher searcher, String docNameField) {
IndexSearcher searcher, String docNameField) {
this.qualityQueries = qqs;
this.qqParser = qqParser;
this.searcher = searcher;

View File

@ -17,13 +17,10 @@ package org.apache.lucene.benchmark.quality.trec;
* limitations under the License.
*/
import org.apache.lucene.benchmark.quality.trec.TrecJudge;
import org.apache.lucene.benchmark.quality.trec.TrecTopicsReader;
import org.apache.lucene.benchmark.quality.utils.SimpleQQParser;
import org.apache.lucene.benchmark.quality.utils.SubmissionReport;
import org.apache.lucene.benchmark.quality.*;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.store.FSDirectory;
import java.io.BufferedReader;
@ -56,7 +53,7 @@ public class QueryDriver {
SubmissionReport submitLog = new SubmissionReport(new PrintWriter(args[2]), "lucene");
FSDirectory dir = FSDirectory.open(new File(args[3]));
String fieldSpec = args.length == 5 ? args[4] : "T"; // default to Title-only if not specified.
Searcher searcher = new IndexSearcher(dir, true);
IndexSearcher searcher = new IndexSearcher(dir, true);
int maxResults = 1000;
String docNameField = "docname";

View File

@ -20,7 +20,7 @@ import java.io.IOException;
import org.apache.lucene.document.FieldSelector;
import org.apache.lucene.document.FieldSelectorResult;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.IndexSearcher;
/**
* Utility: extract doc names from an index
@ -52,7 +52,7 @@ public class DocNameExtractor {
* @return the name of the input doc as extracted from the index.
* @throws IOException if cannot extract the doc name from the index.
*/
public String docName(Searcher searcher, int docid) throws IOException {
public String docName(IndexSearcher searcher, int docid) throws IOException {
return searcher.doc(docid,fldSel).get(docNameField);
}

View File

@ -22,7 +22,7 @@ import java.text.NumberFormat;
import org.apache.lucene.benchmark.quality.QualityQuery;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.TopDocs;
/**
@ -58,7 +58,7 @@ public class SubmissionReport {
* @param searcher index access for fetching doc name.
* @throws IOException in case of a problem.
*/
public void report(QualityQuery qq, TopDocs td, String docNameField, Searcher searcher) throws IOException {
public void report(QualityQuery qq, TopDocs td, String docNameField, IndexSearcher searcher) throws IOException {
if (logger==null) {
return;
}

View File

@ -334,13 +334,13 @@ class SpatialDistanceQuery extends Query {
public void extractTerms(Set terms) {}
protected class SpatialWeight extends Weight {
protected Searcher searcher;
protected IndexSearcher searcher;
protected float queryNorm;
protected float queryWeight;
protected Map latContext;
protected Map lonContext;
public SpatialWeight(Searcher searcher) throws IOException {
public SpatialWeight(IndexSearcher searcher) throws IOException {
this.searcher = searcher;
this.latContext = latSource.newContext();
this.lonContext = lonSource.newContext();
@ -535,7 +535,7 @@ class SpatialDistanceQuery extends Query {
@Override
public Weight createWeight(Searcher searcher) throws IOException {
public Weight createWeight(IndexSearcher searcher) throws IOException {
return new SpatialWeight(searcher);
}

View File

@ -54,7 +54,7 @@ class LuceneQueryOptimizer {
}
public TopDocs optimize(BooleanQuery original,
Searcher searcher,
IndexSearcher searcher,
int numHits,
Query[] queryOut,
Filter[] filterOut

View File

@ -59,7 +59,7 @@ public class SolrConstantScoreQuery extends ConstantScoreQuery {
private float queryWeight;
private Map context;
public ConstantWeight(Searcher searcher) throws IOException {
public ConstantWeight(IndexSearcher searcher) throws IOException {
this.similarity = getSimilarity(searcher);
this.context = ValueSource.newContext();
if (filter instanceof SolrFilter)
@ -161,7 +161,7 @@ public class SolrConstantScoreQuery extends ConstantScoreQuery {
}
@Override
public Weight createWeight(Searcher searcher) {
public Weight createWeight(IndexSearcher searcher) {
try {
return new SolrConstantScoreQuery.ConstantWeight(searcher);
} catch (IOException e) {

View File

@ -18,7 +18,7 @@
package org.apache.solr.search;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.index.IndexReader;
@ -35,7 +35,7 @@ public abstract class SolrFilter extends Filter {
/** Implementations should propagate createWeight to sub-ValueSources which can store weight info in the context.
* The context object will be passed to getDocIdSet() where this info can be retrieved. */
public abstract void createWeight(Map context, Searcher searcher) throws IOException;
public abstract void createWeight(Map context, IndexSearcher searcher) throws IOException;
public abstract DocIdSet getDocIdSet(Map context, IndexReader reader) throws IOException;

View File

@ -20,7 +20,7 @@ import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.spell.JaroWinklerDistance;
import org.apache.lucene.search.spell.LevensteinDistance;
@ -889,7 +889,7 @@ abstract class Double2Parser extends NamedParser {
}
@Override
public void createWeight(Map context, Searcher searcher) throws IOException {
public void createWeight(Map context, IndexSearcher searcher) throws IOException {
a.createWeight(context,searcher);
b.createWeight(context,searcher);
}

View File

@ -53,16 +53,16 @@ public class BoostedQuery extends Query {
q.extractTerms(terms);
}
public Weight createWeight(Searcher searcher) throws IOException {
public Weight createWeight(IndexSearcher searcher) throws IOException {
return new BoostedQuery.BoostedWeight(searcher);
}
private class BoostedWeight extends Weight {
Searcher searcher;
IndexSearcher searcher;
Weight qWeight;
Map context;
public BoostedWeight(Searcher searcher) throws IOException {
public BoostedWeight(IndexSearcher searcher) throws IOException {
this.searcher = searcher;
this.qWeight = q.weight(searcher);
this.context = boostVal.newContext();
@ -129,9 +129,9 @@ public class BoostedQuery extends Query {
private final Scorer scorer;
private final DocValues vals;
private final IndexReader reader;
private final Searcher searcher;
private final IndexSearcher searcher;
private CustomScorer(Similarity similarity, Searcher searcher, IndexReader reader, BoostedQuery.BoostedWeight w,
private CustomScorer(Similarity similarity, IndexSearcher searcher, IndexReader reader, BoostedQuery.BoostedWeight w,
Scorer scorer, ValueSource vs) throws IOException {
super(similarity);
this.weight = w;

View File

@ -19,7 +19,7 @@ package org.apache.solr.search.function;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.util.BytesRef;
import org.apache.solr.search.MutableValueInt;
import org.apache.solr.search.MutableValue;
@ -240,13 +240,13 @@ public class DocFreqValueSource extends ValueSource {
@Override
public DocValues getValues(Map context, IndexReader reader) throws IOException {
Searcher searcher = (Searcher)context.get("searcher");
IndexSearcher searcher = (IndexSearcher)context.get("searcher");
int docfreq = searcher.docFreq(new Term(indexedField, indexedBytes));
return new ConstIntDocValues(docfreq, this);
}
@Override
public void createWeight(Map context, Searcher searcher) throws IOException {
public void createWeight(Map context, IndexSearcher searcher) throws IOException {
context.put("searcher",searcher);
}

View File

@ -18,7 +18,7 @@
package org.apache.solr.search.function;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.IndexSearcher;
import java.io.IOException;
import java.util.Map;
@ -69,7 +69,7 @@ public abstract class DualFloatFunction extends ValueSource {
}
@Override
public void createWeight(Map context, Searcher searcher) throws IOException {
public void createWeight(Map context, IndexSearcher searcher) throws IOException {
a.createWeight(context,searcher);
b.createWeight(context,searcher);
}

View File

@ -60,12 +60,12 @@ public class FunctionQuery extends Query {
public void extractTerms(Set terms) {}
protected class FunctionWeight extends Weight {
protected Searcher searcher;
protected IndexSearcher searcher;
protected float queryNorm;
protected float queryWeight;
protected Map context;
public FunctionWeight(Searcher searcher) throws IOException {
public FunctionWeight(IndexSearcher searcher) throws IOException {
this.searcher = searcher;
this.context = func.newContext();
func.createWeight(context, searcher);
@ -184,7 +184,7 @@ public class FunctionQuery extends Query {
@Override
public Weight createWeight(Searcher searcher) throws IOException {
public Weight createWeight(IndexSearcher searcher) throws IOException {
return new FunctionQuery.FunctionWeight(searcher);
}

View File

@ -18,7 +18,7 @@
package org.apache.solr.search.function;
import org.apache.lucene.index.*;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Similarity;
import org.apache.lucene.util.BytesRef;
import org.apache.solr.util.ByteUtils;
@ -39,7 +39,7 @@ public class IDFValueSource extends DocFreqValueSource {
@Override
public DocValues getValues(Map context, IndexReader reader) throws IOException {
Searcher searcher = (Searcher)context.get("searcher");
IndexSearcher searcher = (IndexSearcher)context.get("searcher");
Similarity sim = searcher.getSimilarity();
// todo: we need docFreq that takes a BytesRef
String strVal = ByteUtils.UTF8toUTF16(indexedBytes);

View File

@ -18,7 +18,7 @@
package org.apache.solr.search.function;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.IndexSearcher;
import java.io.IOException;
import java.util.Map;
@ -71,7 +71,7 @@ public class LinearFloatFunction extends ValueSource {
}
@Override
public void createWeight(Map context, Searcher searcher) throws IOException {
public void createWeight(Map context, IndexSearcher searcher) throws IOException {
source.createWeight(context, searcher);
}

Some files were not shown because too many files have changed in this diff Show More