LUCENE-6301: Removal of org.apache.lucene.Filter.

From a Lucene perspective Filter is gone. However it was still used for things
like DocSet and SolrConstantScoreQuery in Solr, so it has been moved to
the oas.search package for now, even though in the long term it would be nice
for Solr to move to the Query API entirely as well.


git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1708097 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Adrien Grand 2015-10-12 12:15:07 +00:00
parent 3e8cd29d55
commit b3d191832c
62 changed files with 313 additions and 723 deletions

View File

@ -99,6 +99,11 @@ API Changes
* LUCENE-6803: Deprecate sandbox Regexp Query. (Uwe Schindler)
* LUCENE-6301: org.apache.lucene.search.Filter is now deprecated. You should use
Query objects instead of Filters, and the BooleanClause.Occur.FILTER clause in
order to let Lucene know that a Query should be used for filtering but not
scoring.
Optimizations
* LUCENE-6708: TopFieldCollector does not compute the score several times on the

View File

@ -31,10 +31,12 @@ situations where some documents do not have values for fields wrapped in other
ValueSources. Users who want to preserve the previous behavior may need to wrap
their ValueSources in a "DefFunction" along with a ConstValueSource of "0.0".
## Removal of FilteredQuery (LUCENE-6583)
## Removal of Filter and FilteredQuery (LUCENE-6301,LUCENE-6583)
FilteredQuery has been removed. Instead, you can construct a BooleanQuery with
one MUST clause for the query, and one FILTER clause for the filter.
Filter and FilteredQuery have been removed. Regular queries can be used instead
of filters as they have been optimized for the filtering case. And you can
construct a BooleanQuery with one MUST clause for the query, and one FILTER
clause for the filter in order to have similar behaviour to FilteredQuery.
## PhraseQuery and BooleanQuery made immutable (LUCENE-6531 LUCENE-6570)

View File

@ -22,8 +22,7 @@ import java.io.IOException;
/**
* Abstract decorator class of a DocIdSetIterator
* implementation that provides on-demand filter/validation
* mechanism on an underlying DocIdSetIterator. See {@link
* FilteredDocIdSet}.
* mechanism on an underlying DocIdSetIterator.
*/
public abstract class FilteredDocIdSetIterator extends DocIdSetIterator {
protected DocIdSetIterator _innerIter;

View File

@ -500,8 +500,6 @@
* Weight object is an internal representation of the Query that allows the Query
* to be reused by the IndexSearcher.</li>
* <li>The IndexSearcher that initiated the call.</li>
* <li>A {@link org.apache.lucene.search.Filter Filter} for limiting the result set.
* Note, the Filter may be null.</li>
* <li>A {@link org.apache.lucene.search.Sort Sort} object for specifying how to sort
* the results if the standard score-based sort method is not desired.</li>
* </ol>
@ -509,8 +507,7 @@
* we call one of the search methods of the IndexSearcher, passing in the
* {@link org.apache.lucene.search.Weight Weight} object created by
* {@link org.apache.lucene.search.IndexSearcher#createNormalizedWeight(org.apache.lucene.search.Query,boolean)
* IndexSearcher.createNormalizedWeight(Query,boolean)},
* {@link org.apache.lucene.search.Filter Filter} and the number of results we want.
* IndexSearcher.createNormalizedWeight(Query,boolean)} and the number of results we want.
* This method returns a {@link org.apache.lucene.search.TopDocs TopDocs} object,
* which is an internal collection of search results. The IndexSearcher creates
* a {@link org.apache.lucene.search.TopScoreDocCollector TopScoreDocCollector} and

View File

@ -24,7 +24,6 @@ import org.apache.lucene.index.FieldInvertState;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.similarities.Similarity;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.PriorityQueue;
/**
@ -130,34 +129,6 @@ final class JustCompileSearch {
}
static final class JustCompileFilter extends Filter {
// Filter is just an abstract class with no abstract methods. However it is
// still added here in case someone will add abstract methods in the future.
@Override
public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) {
return null;
}
@Override
public String toString(String field) {
return "JustCompileFilter";
}
}
static final class JustCompileFilteredDocIdSet extends FilteredDocIdSet {
public JustCompileFilteredDocIdSet(DocIdSet innerSet) {
super(innerSet);
}
@Override
protected boolean match(int docid) {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
}
static final class JustCompileFilteredDocIdSetIterator extends FilteredDocIdSetIterator {
public JustCompileFilteredDocIdSetIterator(DocIdSetIterator innerIter) {

View File

@ -1,47 +0,0 @@
package org.apache.lucene.search;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.util.BitDocIdSet;
import org.apache.lucene.util.FixedBitSet;
import org.apache.lucene.util.Bits;
public class MockFilter extends Filter {
private boolean wasCalled;
@Override
public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) {
wasCalled = true;
FixedBitSet bits = new FixedBitSet(context.reader().maxDoc());
return new BitDocIdSet(bits);
}
@Override
public String toString(String field) {
return "MockFilter";
}
public void clear() {
wasCalled = false;
}
public boolean wasCalled() {
return wasCalled;
}
}

View File

@ -1,59 +0,0 @@
package org.apache.lucene.search;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BitDocIdSet;
import org.apache.lucene.util.FixedBitSet;
public class SingleDocTestFilter extends Filter {
private int doc;
public SingleDocTestFilter(int doc) {
this.doc = doc;
}
@Override
public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException {
FixedBitSet bits = new FixedBitSet(context.reader().maxDoc());
bits.set(doc);
if (acceptDocs != null && !acceptDocs.get(doc)) bits.clear(doc);
return new BitDocIdSet(bits);
}
@Override
public String toString(String field) {
return "SingleDocTestFilter(" + doc + ")";
}
@Override
public boolean equals(Object obj) {
if (super.equals(obj) == false) {
return false;
}
return doc == ((SingleDocTestFilter) obj).doc;
}
@Override
public int hashCode() {
return 31 * super.hashCode() + doc;
}
}

View File

@ -18,6 +18,7 @@ package org.apache.lucene.search;
*/
import java.io.IOException;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
@ -39,7 +40,7 @@ public class TestCachingWrapperQuery extends LuceneTestCase {
DirectoryReader ir;
IndexSearcher is;
RandomIndexWriter iw;
@Override
public void setUp() throws Exception {
super.setUp();
@ -60,7 +61,7 @@ public class TestCachingWrapperQuery extends LuceneTestCase {
ir = iw.getReader();
is = newSearcher(ir);
}
@Override
public void tearDown() throws Exception {
iw.close();
@ -80,14 +81,14 @@ public class TestCachingWrapperQuery extends LuceneTestCase {
assertEquals(hits3.totalHits, hits4.totalHits);
CheckHits.checkEqual(f1, hits3.scoreDocs, hits4.scoreDocs);
}
/** test null iterator */
public void testEmpty() throws Exception {
BooleanQuery.Builder expected = new BooleanQuery.Builder();
Query cached = new CachingWrapperQuery(expected.build(), MAYBE_CACHE_POLICY);
assertQueryEquals(expected.build(), cached);
}
/** test iterator returns NO_MORE_DOCS */
public void testEmpty2() throws Exception {
BooleanQuery.Builder expected = new BooleanQuery.Builder();
@ -96,7 +97,7 @@ public class TestCachingWrapperQuery extends LuceneTestCase {
Query cached = new CachingWrapperQuery(expected.build(), MAYBE_CACHE_POLICY);
assertQueryEquals(expected.build(), cached);
}
/** test iterator returns single document */
public void testSingle() throws Exception {
for (int i = 0; i < 10; i++) {
@ -106,7 +107,7 @@ public class TestCachingWrapperQuery extends LuceneTestCase {
assertQueryEquals(expected, cached);
}
}
/** test sparse filters (match single documents) */
public void testSparse() throws Exception {
for (int i = 0; i < 10; i++) {
@ -118,15 +119,43 @@ public class TestCachingWrapperQuery extends LuceneTestCase {
assertQueryEquals(expected, cached);
}
}
/** test dense filters (match entire index) */
public void testDense() throws Exception {
Query query = new MatchAllDocsQuery();
Filter expected = new QueryWrapperFilter(query);
Query expected = new MatchAllDocsQuery();
Query cached = new CachingWrapperQuery(expected, MAYBE_CACHE_POLICY);
assertQueryEquals(expected, cached);
}
private static class MockQuery extends Query {
private final AtomicBoolean wasCalled = new AtomicBoolean();
public boolean wasCalled() {
return wasCalled.get();
}
public void clear() {
wasCalled.set(false);
}
@Override
public String toString(String field) {
return "Mock";
}
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
return new ConstantScoreWeight(this) {
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
wasCalled.set(true);
return new ConstantScoreScorer(this, score(), DocIdSetIterator.all(context.reader().maxDoc()));
}
};
}
}
public void testCachingWorks() throws Exception {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
@ -135,7 +164,7 @@ public class TestCachingWrapperQuery extends LuceneTestCase {
IndexReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir));
IndexSearcher searcher = newSearcher(reader);
LeafReaderContext context = (LeafReaderContext) reader.getContext();
MockFilter filter = new MockFilter();
MockQuery filter = new MockQuery();
CachingWrapperQuery cacher = new CachingWrapperQuery(filter, QueryCachingPolicy.ALWAYS_CACHE);
// first time, nested filter is called
@ -257,7 +286,7 @@ public class TestCachingWrapperQuery extends LuceneTestCase {
reader = refreshReader(reader);
searcher = newSearcher(reader, false);
docs = searcher.search(new ConstantScoreQuery(query), 1);
assertEquals("[query + filter] Should find 2 hits...", 2, docs.totalHits);
assertTrue(query.missCount > missCount);

View File

@ -26,14 +26,12 @@ import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.MultiReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.similarities.ClassicSimilarity;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.LuceneTestCase;
@ -130,31 +128,32 @@ public class TestConstantScoreQuery extends LuceneTestCase {
}
}
// a filter for which other queries don't have special rewrite rules
private static class FilterWrapper extends Filter {
// a query for which other queries don't have special rewrite rules
private static class QueryWrapper extends Query {
private final Filter in;
FilterWrapper(Filter in) {
private final Query in;
QueryWrapper(Query in) {
this.in = in;
}
@Override
public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException {
return in.getDocIdSet(context, acceptDocs);
}
@Override
public String toString(String field) {
return in.toString(field);
return "MockQuery";
}
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
return in.createWeight(searcher, needsScores);
}
@Override
public boolean equals(Object obj) {
if (super.equals(obj) == false) {
return false;
}
return in.equals(((FilterWrapper) obj).in);
QueryWrapper that = (QueryWrapper) obj;
return in.equals(that.in);
}
@Override
@ -175,7 +174,7 @@ public class TestConstantScoreQuery extends LuceneTestCase {
IndexReader r = w.getReader();
w.close();
Filter filterB = new FilterWrapper(new QueryWrapperFilter(new TermQuery(new Term("field", "b"))));
Query filterB = new QueryWrapper(new TermQuery(new Term("field", "b")));
Query query = new ConstantScoreQuery(filterB);
IndexSearcher s = newSearcher(r);
@ -185,7 +184,7 @@ public class TestConstantScoreQuery extends LuceneTestCase {
.build();
assertEquals(1, s.search(filtered, 1).totalHits); // Query for field:b, Filter field:b
Filter filterA = new FilterWrapper(new QueryWrapperFilter(new TermQuery(new Term("field", "a"))));
Query filterA = new QueryWrapper(new TermQuery(new Term("field", "a")));
query = new ConstantScoreQuery(filterA);
filtered = new BooleanQuery.Builder()
@ -198,35 +197,6 @@ public class TestConstantScoreQuery extends LuceneTestCase {
d.close();
}
// LUCENE-5307
// don't reuse the scorer of filters since they have been created with bulkScorer=false
public void testQueryWrapperFilter() throws IOException {
Directory d = newDirectory();
RandomIndexWriter w = new RandomIndexWriter(random(), d);
Document doc = new Document();
doc.add(newStringField("field", "a", Field.Store.NO));
w.addDocument(doc);
IndexReader r = w.getReader();
w.close();
final Query wrapped = AssertingQuery.wrap(random(), new TermQuery(new Term("field", "a")));
Filter filter = new QueryWrapperFilter(wrapped);
IndexSearcher s = newSearcher(r);
assert s instanceof AssertingIndexSearcher;
// this used to fail
s.search(new ConstantScoreQuery(filter), new TotalHitCountCollector());
// check the rewrite
Query rewritten = filter;
for (Query q = rewritten.rewrite(r); q != rewritten; q = rewritten.rewrite(r)) {
rewritten = q;
}
assertEquals(new BoostQuery(new ConstantScoreQuery(wrapped), 0), rewritten);
r.close();
d.close();
}
public void testPropagatesApproximations() throws IOException {
Directory dir = newDirectory();
RandomIndexWriter w = new RandomIndexWriter(random(), dir);

View File

@ -1,197 +0,0 @@
package org.apache.lucene.search;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.document.Field;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.document.DateTools;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import java.io.IOException;
/**
* DateFilter JUnit tests.
*
*
*/
public class TestDateFilter extends LuceneTestCase {
/**
*
*/
public void testBefore() throws IOException {
// create an index
Directory indexStore = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), indexStore);
long now = System.currentTimeMillis();
Document doc = new Document();
// add time that is in the past
doc.add(newStringField("datefield", DateTools.timeToString(now - 1000, DateTools.Resolution.MILLISECOND), Field.Store.YES));
doc.add(newTextField("body", "Today is a very sunny day in New York City", Field.Store.YES));
writer.addDocument(doc);
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher searcher = newSearcher(reader);
// filter that should preserve matches
// DateFilter df1 = DateFilter.Before("datefield", now);
Filter df1 = new QueryWrapperFilter(TermRangeQuery.newStringRange("datefield", DateTools
.timeToString(now - 2000, DateTools.Resolution.MILLISECOND), DateTools
.timeToString(now, DateTools.Resolution.MILLISECOND), false, true));
// filter that should discard matches
// DateFilter df2 = DateFilter.Before("datefield", now - 999999);
Filter df2 = new QueryWrapperFilter(TermRangeQuery.newStringRange("datefield", DateTools
.timeToString(0, DateTools.Resolution.MILLISECOND), DateTools
.timeToString(now - 2000, DateTools.Resolution.MILLISECOND), true,
false));
// search something that doesn't exist with DateFilter
Query query1 = new TermQuery(new Term("body", "NoMatchForThis"));
// search for something that does exists
Query query2 = new TermQuery(new Term("body", "sunny"));
ScoreDoc[] result;
// ensure that queries return expected results without DateFilter first
result = searcher.search(query1, 1000).scoreDocs;
assertEquals(0, result.length);
result = searcher.search(query2, 1000).scoreDocs;
assertEquals(1, result.length);
// run queries with DateFilter
Query filtered = new BooleanQuery.Builder()
.add(query1, Occur.MUST)
.add(df1, Occur.FILTER)
.build();
result = searcher.search(filtered, 1000).scoreDocs;
assertEquals(0, result.length);
filtered = new BooleanQuery.Builder()
.add(query1, Occur.MUST)
.add(df2, Occur.FILTER)
.build();
result = searcher.search(filtered, 1000).scoreDocs;
assertEquals(0, result.length);
filtered = new BooleanQuery.Builder()
.add(query2, Occur.MUST)
.add(df1, Occur.FILTER)
.build();
result = searcher.search(filtered, 1000).scoreDocs;
assertEquals(1, result.length);
filtered = new BooleanQuery.Builder()
.add(query2, Occur.MUST)
.add(df2, Occur.FILTER)
.build();
result = searcher.search(filtered, 1000).scoreDocs;
assertEquals(0, result.length);
reader.close();
indexStore.close();
}
/**
*
*/
public void testAfter() throws IOException {
// create an index
Directory indexStore = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), indexStore);
long now = System.currentTimeMillis();
Document doc = new Document();
// add time that is in the future
doc.add(newStringField("datefield", DateTools.timeToString(now + 888888, DateTools.Resolution.MILLISECOND), Field.Store.YES));
doc.add(newTextField("body", "Today is a very sunny day in New York City", Field.Store.YES));
writer.addDocument(doc);
IndexReader reader = writer.getReader();
writer.close();
IndexSearcher searcher = newSearcher(reader);
// filter that should preserve matches
// DateFilter df1 = DateFilter.After("datefield", now);
Filter df1 = new QueryWrapperFilter(TermRangeQuery.newStringRange("datefield", DateTools
.timeToString(now, DateTools.Resolution.MILLISECOND), DateTools
.timeToString(now + 999999, DateTools.Resolution.MILLISECOND), true,
false));
// filter that should discard matches
// DateFilter df2 = DateFilter.After("datefield", now + 999999);
Filter df2 = new QueryWrapperFilter(TermRangeQuery.newStringRange("datefield", DateTools
.timeToString(now + 999999, DateTools.Resolution.MILLISECOND),
DateTools.timeToString(now + 999999999,
DateTools.Resolution.MILLISECOND), false, true));
// search something that doesn't exist with DateFilter
Query query1 = new TermQuery(new Term("body", "NoMatchForThis"));
// search for something that does exists
Query query2 = new TermQuery(new Term("body", "sunny"));
ScoreDoc[] result;
// ensure that queries return expected results without DateFilter first
result = searcher.search(query1, 1000).scoreDocs;
assertEquals(0, result.length);
result = searcher.search(query2, 1000).scoreDocs;
assertEquals(1, result.length);
// run queries with DateFilter
Query filtered = new BooleanQuery.Builder()
.add(query1, Occur.MUST)
.add(df1, Occur.FILTER)
.build();
result = searcher.search(filtered, 1000).scoreDocs;
assertEquals(0, result.length);
filtered = new BooleanQuery.Builder()
.add(query1, Occur.MUST)
.add(df2, Occur.FILTER)
.build();
result = searcher.search(filtered, 1000).scoreDocs;
assertEquals(0, result.length);
filtered = new BooleanQuery.Builder()
.add(query2, Occur.MUST)
.add(df1, Occur.FILTER)
.build();
result = searcher.search(filtered, 1000).scoreDocs;
assertEquals(1, result.length);
filtered = new BooleanQuery.Builder()
.add(query2, Occur.MUST)
.add(df2, Occur.FILTER)
.build();
result = searcher.search(filtered, 1000).scoreDocs;
assertEquals(0, result.length);
reader.close();
indexStore.close();
}
}

View File

@ -74,13 +74,6 @@ public class TestNeedsScores extends LuceneTestCase {
assertEquals(5, searcher.search(constantScore, 5).totalHits);
}
/** when converted to a filter */
public void testQueryWrapperFilter() throws Exception {
Query term = new TermQuery(new Term("field", "this"));
Filter filter = new QueryWrapperFilter(new AssertNeedsScores(term, false));
assertEquals(5, searcher.search(filter, 5).totalHits);
}
/** when not sorting by score */
public void testSortByField() throws Exception {
Query query = new AssertNeedsScores(new MatchAllDocsQuery(), false);

View File

@ -27,7 +27,7 @@ import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase;
public class TestFilterCachingPolicy extends LuceneTestCase {
public class TestQueryCachingPolicy extends LuceneTestCase {
public void testLargeSegmentDetection() throws IOException {
Directory dir = newDirectory();
@ -40,12 +40,12 @@ public class TestFilterCachingPolicy extends LuceneTestCase {
for (float minSizeRatio : new float[] {Float.MIN_VALUE, 0.01f, 0.1f, 0.9f}) {
final QueryCachingPolicy policy = new QueryCachingPolicy.CacheOnLargeSegments(0, minSizeRatio);
for (LeafReaderContext ctx : reader.leaves()) {
final Filter filter = new QueryWrapperFilter(new TermQuery(new Term("field", "value")));
final boolean shouldCache = policy.shouldCache(filter, ctx);
final Query query = new TermQuery(new Term("field", "value"));
final boolean shouldCache = policy.shouldCache(query, ctx);
final float sizeRatio = (float) ctx.reader().maxDoc() / reader.maxDoc();
assertEquals(sizeRatio >= minSizeRatio, shouldCache);
assertTrue(new QueryCachingPolicy.CacheOnLargeSegments(numDocs, Float.MIN_VALUE).shouldCache(filter, ctx));
assertFalse(new QueryCachingPolicy.CacheOnLargeSegments(numDocs + 1, Float.MIN_VALUE).shouldCache(filter, ctx));
assertTrue(new QueryCachingPolicy.CacheOnLargeSegments(numDocs, Float.MIN_VALUE).shouldCache(query, ctx));
assertFalse(new QueryCachingPolicy.CacheOnLargeSegments(numDocs + 1, Float.MIN_VALUE).shouldCache(query, ctx));
}
}
reader.close();

View File

@ -14,6 +14,7 @@ import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BitDocIdSet;
import org.apache.lucene.util.BitSetIterator;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.FixedBitSet;
import org.apache.lucene.util.LuceneTestCase;
@ -140,30 +141,35 @@ public class TestScorerPerf extends LuceneTestCase {
}
}
private static class BitSetFilter extends Filter {
private static class BitSetQuery extends Query {
private final FixedBitSet docs;
BitSetFilter(FixedBitSet docs) {
BitSetQuery(FixedBitSet docs) {
this.docs = docs;
}
@Override
public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException {
assertNull("acceptDocs should be null, as we have an index without deletions", acceptDocs);
return new BitDocIdSet(docs);
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
return new ConstantScoreWeight(this) {
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
return new ConstantScoreScorer(this, score(), new BitSetIterator(docs, docs.approximateCardinality()));
}
};
}
@Override
public String toString(String field) {
return "randomBitSetFilter";
}
@Override
public boolean equals(Object obj) {
if (super.equals(obj) == false) {
return false;
}
return docs == ((BitSetFilter) obj).docs;
return docs == ((BitSetQuery) obj).docs;
}
@Override
@ -174,7 +180,7 @@ public class TestScorerPerf extends LuceneTestCase {
FixedBitSet addClause(BooleanQuery.Builder bq, FixedBitSet result) {
final FixedBitSet rnd = sets[random().nextInt(sets.length)];
Query q = new ConstantScoreQuery(new BitSetFilter(rnd));
Query q = new BitSetQuery(rnd);
bq.add(q, BooleanClause.Occur.MUST);
if (validate) {
if (result==null) result = rnd.clone();

View File

@ -36,10 +36,8 @@ import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.NumericDocValues;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BitDocIdSet;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BitSetIterator;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FixedBitSet;
import org.apache.lucene.util.LuceneTestCase;
@ -147,9 +145,8 @@ public class TestSortRandom extends LuceneTestCase {
sort = new Sort(sf, SortField.FIELD_DOC);
}
final int hitCount = TestUtil.nextInt(random, 1, r.maxDoc() + 20);
final RandomFilter f = new RandomFilter(random.nextLong(), random.nextFloat(), docValues);
hits = s.search(new ConstantScoreQuery(f),
hitCount, sort, random.nextBoolean(), random.nextBoolean());
final RandomQuery f = new RandomQuery(random.nextLong(), random.nextFloat(), docValues);
hits = s.search(f, hitCount, sort, random.nextBoolean(), random.nextBoolean());
if (VERBOSE) {
System.out.println("\nTEST: iter=" + iter + " " + hits.totalHits + " hits; topN=" + hitCount + "; reverse=" + reverse + "; sortMissingLast=" + sortMissingLast + " sort=" + sort);
@ -218,35 +215,40 @@ public class TestSortRandom extends LuceneTestCase {
dir.close();
}
private static class RandomFilter extends Filter {
private static class RandomQuery extends Query {
private final long seed;
private float density;
private final List<BytesRef> docValues;
public final List<BytesRef> matchValues = Collections.synchronizedList(new ArrayList<BytesRef>());
// density should be 0.0 ... 1.0
public RandomFilter(long seed, float density, List<BytesRef> docValues) {
public RandomQuery(long seed, float density, List<BytesRef> docValues) {
this.seed = seed;
this.density = density;
this.docValues = docValues;
}
@Override
public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException {
Random random = new Random(context.docBase ^ seed);
final int maxDoc = context.reader().maxDoc();
final NumericDocValues idSource = DocValues.getNumeric(context.reader(), "id");
assertNotNull(idSource);
final FixedBitSet bits = new FixedBitSet(maxDoc);
for(int docID=0;docID<maxDoc;docID++) {
if (random.nextFloat() <= density && (acceptDocs == null || acceptDocs.get(docID))) {
bits.set(docID);
//System.out.println(" acc id=" + idSource.getInt(docID) + " docID=" + docID);
matchValues.add(docValues.get((int) idSource.get(docID)));
}
}
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
return new ConstantScoreWeight(this) {
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
Random random = new Random(context.docBase ^ seed);
final int maxDoc = context.reader().maxDoc();
final NumericDocValues idSource = DocValues.getNumeric(context.reader(), "id");
assertNotNull(idSource);
final FixedBitSet bits = new FixedBitSet(maxDoc);
for(int docID=0;docID<maxDoc;docID++) {
if (random.nextFloat() <= density) {
bits.set(docID);
//System.out.println(" acc id=" + idSource.getInt(docID) + " docID=" + docID);
matchValues.add(docValues.get((int) idSource.get(docID)));
}
}
return new BitDocIdSet(bits);
return new ConstantScoreScorer(this, score(), new BitSetIterator(bits, bits.approximateCardinality()));
}
};
}
@Override
@ -259,7 +261,7 @@ public class TestSortRandom extends LuceneTestCase {
if (super.equals(obj) == false) {
return false;
}
RandomFilter other = (RandomFilter) obj;
RandomQuery other = (RandomQuery) obj;
return seed == other.seed && docValues == other.docValues;
}

View File

@ -32,11 +32,9 @@ import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.CheckHits;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryWrapperFilter;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
import org.apache.lucene.search.TermQuery;

View File

@ -39,31 +39,28 @@ import org.apache.lucene.facet.sortedset.SortedSetDocValuesReaderState;
import org.apache.lucene.facet.taxonomy.TaxonomyReader;
import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyReader;
import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyWriter;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.Collector;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.RandomAccessWeight;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.SimpleCollector;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.Weight;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BitDocIdSet;
import org.apache.lucene.util.FixedBitSet;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.InPlaceMergeSorter;
import org.apache.lucene.util.InfoStream;
@ -646,29 +643,45 @@ public class TestDrillSideways extends FacetTestCase {
}
}
Filter filter;
Query filter;
if (random().nextInt(7) == 6) {
if (VERBOSE) {
System.out.println(" only-even filter");
}
filter = new Filter() {
@Override
public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException {
int maxDoc = context.reader().maxDoc();
final FixedBitSet bits = new FixedBitSet(maxDoc);
for(int docID=0;docID < maxDoc;docID++) {
// Keeps only the even ids:
if ((acceptDocs == null || acceptDocs.get(docID)) && (Integer.parseInt(context.reader().document(docID).get("id")) & 1) == 0) {
bits.set(docID);
}
filter = new Query() {
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
return new RandomAccessWeight(this) {
@Override
protected Bits getMatchingDocs(final LeafReaderContext context) throws IOException {
return new Bits() {
@Override
public boolean get(int docID) {
try {
return (Integer.parseInt(context.reader().document(docID).get("id")) & 1) == 0;
} catch (NumberFormatException | IOException e) {
throw new RuntimeException(e);
}
}
@Override
public int length() {
return context.reader().maxDoc();
}
};
}
return new BitDocIdSet(bits);
}
@Override
public String toString(String field) {
return "drillSidewaysTestFilter";
}
};
};
}
@Override
public String toString(String field) {
return "drillSidewaysTestFilter";
}
};
} else {
filter = null;
}
@ -865,7 +878,7 @@ public class TestDrillSideways extends FacetTestCase {
private TestFacetResult slowDrillSidewaysSearch(IndexSearcher s, List<Doc> docs,
String contentToken, String[][] drillDowns,
String[][] dimValues, Filter onlyEven) throws Exception {
String[][] dimValues, Query onlyEven) throws Exception {
int numDims = dimValues.length;
List<Doc> hits = new ArrayList<>();

View File

@ -28,10 +28,8 @@ import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.queries.function.ValueSource;
import org.apache.lucene.queries.function.valuesource.BytesRefFieldSource;
import org.apache.lucene.search.CachingWrapperQuery;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.QueryWrapperFilter;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.store.Directory;
@ -159,7 +157,7 @@ public class GroupingSearchTest extends LuceneTestCase {
assertEquals(1, group.scoreDocs.length);
assertEquals(6, group.scoreDocs[0].doc);
Filter lastDocInBlock = new QueryWrapperFilter(new TermQuery(new Term("groupend", "x")));
Query lastDocInBlock = new TermQuery(new Term("groupend", "x"));
groupingSearch = new GroupingSearch(lastDocInBlock);
groups = groupingSearch.search(indexSearcher, new TermQuery(new Term("content", "random")), 0, 10);

View File

@ -71,7 +71,6 @@ import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.PhraseQuery.Builder;
import org.apache.lucene.search.PrefixQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryWrapperFilter;
import org.apache.lucene.search.RegexpQuery;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TermRangeQuery;
@ -585,8 +584,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
public void testToParentBlockJoinQuery() throws Exception {
BitSetProducer parentFilter = new QueryBitSetProducer(
new QueryWrapperFilter(
new TermQuery(new Term(FIELD_NAME, "parent"))));
new TermQuery(new Term(FIELD_NAME, "parent")));
query = new ToParentBlockJoinQuery(new TermQuery(new Term(FIELD_NAME, "child")),
parentFilter, ScoreMode.None);
@ -611,8 +609,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
public void testToChildBlockJoinQuery() throws Exception {
BitSetProducer parentFilter = new QueryBitSetProducer(
new QueryWrapperFilter(
new TermQuery(new Term(FIELD_NAME, "parent"))));
new TermQuery(new Term(FIELD_NAME, "parent")));
BooleanQuery.Builder booleanQuery = new BooleanQuery.Builder();
booleanQuery.add(new ToChildBlockJoinQuery(new TermQuery(
@ -910,8 +907,8 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
numHighlights = 0;
if (random().nextBoolean()) {
BooleanQuery.Builder bq = new BooleanQuery.Builder();
bq.add(new ConstantScoreQuery(new QueryWrapperFilter(new TermQuery(
new Term(FIELD_NAME, "kennedy")))), Occur.MUST);
bq.add(new ConstantScoreQuery(new TermQuery(
new Term(FIELD_NAME, "kennedy"))), Occur.MUST);
bq.add(new ConstantScoreQuery(new TermQuery(new Term(FIELD_NAME, "kennedy"))), Occur.MUST);
doSearching(bq.build());
} else {

View File

@ -23,14 +23,11 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.BoostQuery;
import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.PrefixQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.RegexpQuery;
@ -39,7 +36,6 @@ import org.apache.lucene.search.TermRangeQuery;
import org.apache.lucene.search.WildcardQuery;
import org.apache.lucene.search.vectorhighlight.FieldQuery.QueryPhraseMap;
import org.apache.lucene.search.vectorhighlight.FieldTermStack.TermInfo;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
public class FieldQueryTest extends AbstractTestCase {
@ -938,30 +934,6 @@ public class FieldQueryTest extends AbstractTestCase {
new FieldQuery(q, reader, true, true );
}
public void testFlattenFilteredQuery() throws Exception {
initBoost();
Filter filter = new Filter() {
@Override
public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs)
throws IOException {
return null;
}
@Override
public String toString(String field) {
return "filterToBeFlattened";
}
};
Query query = new BooleanQuery.Builder()
.add(pqF( "A" ), Occur.MUST)
.add(filter, Occur.FILTER)
.build();
query = new BoostQuery(query, boost);
FieldQuery fq = new FieldQuery( query, true, true );
Set<Query> flatQueries = new HashSet<>();
fq.flatten( query, reader, flatQueries, 1f );
assertCollectionQueries( flatQueries, tq( boost, "A" ) );
}
public void testFlattenConstantScoreQuery() throws Exception {
initBoost();
Query query = new ConstantScoreQuery(pqF( "A" ));

View File

@ -29,7 +29,7 @@
*
* <p>When you index in this way, the documents in your index are divided
* into parent documents (the last document of each block) and child
* documents (all others). You provide a {@link org.apache.lucene.search.Filter} that identifies the
* documents (all others). You provide a {@link org.apache.lucene.search.join.BitSetProducer} that identifies the
* parent documents, as Lucene does not currently record any information
* about doc blocks.</p>
*

View File

@ -30,7 +30,6 @@ import org.apache.lucene.index.NoMergePolicy;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.QueryWrapperFilter;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase;
@ -88,7 +87,7 @@ public class TestCheckJoinIndex extends LuceneTestCase {
final IndexReader reader = w.getReader();
w.close();
BitSetProducer parentsFilter = new QueryBitSetProducer(new QueryWrapperFilter(new TermQuery(new Term("parent", "true"))));
BitSetProducer parentsFilter = new QueryBitSetProducer(new TermQuery(new Term("parent", "true")));
try {
CheckJoinIndex.check(reader, parentsFilter);
fail("Invalid index");
@ -128,7 +127,7 @@ public class TestCheckJoinIndex extends LuceneTestCase {
final IndexReader reader = w.getReader();
w.close();
BitSetProducer parentsFilter = new QueryBitSetProducer(new QueryWrapperFilter(new TermQuery(new Term("parent", "true"))));
BitSetProducer parentsFilter = new QueryBitSetProducer(new TermQuery(new Term("parent", "true")));
try {
CheckJoinIndex.check(reader, parentsFilter);
fail("Invalid index");

View File

@ -21,7 +21,6 @@ import java.io.IOException;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.SortingMergePolicy;
import org.apache.lucene.util.BitDocIdSet;
import org.apache.lucene.util.BitSet;
/**
@ -38,7 +37,7 @@ import org.apache.lucene.util.BitSet;
// TODO: can/should we clean this thing up (e.g. return a proper sort value)
// and move to the join/ module?
public class BlockJoinComparatorSource extends FieldComparatorSource {
final Filter parentsFilter;
final Query parentsFilter;
final Sort parentSort;
final Sort childSort;
@ -49,7 +48,7 @@ public class BlockJoinComparatorSource extends FieldComparatorSource {
* @param parentsFilter Filter identifying parent documents
* @param parentSort Sort for parent documents
*/
public BlockJoinComparatorSource(Filter parentsFilter, Sort parentSort) {
public BlockJoinComparatorSource(Query parentsFilter, Sort parentSort) {
this(parentsFilter, parentSort, new Sort(SortField.FIELD_DOC));
}
@ -61,7 +60,7 @@ public class BlockJoinComparatorSource extends FieldComparatorSource {
* @param parentSort Sort for parent documents
* @param childSort Sort for child documents in the same block
*/
public BlockJoinComparatorSource(Filter parentsFilter, Sort parentSort, Sort childSort) {
public BlockJoinComparatorSource(Query parentsFilter, Sort parentSort, Sort childSort) {
this.parentsFilter = parentsFilter;
this.parentSort = parentSort;
this.childSort = childSort;
@ -119,14 +118,14 @@ public class BlockJoinComparatorSource extends FieldComparatorSource {
if (parentBits != null) {
throw new IllegalStateException("This comparator can only be used on a single segment");
}
final DocIdSet parents = parentsFilter.getDocIdSet(context, null);
IndexSearcher searcher = new IndexSearcher(context.reader());
searcher.setQueryCache(null);
final Weight weight = searcher.createNormalizedWeight(parentsFilter, false);
final DocIdSetIterator parents = weight.scorer(context);
if (parents == null) {
throw new IllegalStateException("LeafReader " + context.reader() + " contains no parents!");
}
if (parents instanceof BitDocIdSet == false) {
throw new IllegalStateException("parentFilter must return BitSet; got " + parents);
}
parentBits = (BitSet) parents.bits();
parentBits = BitSet.of(parents, context.reader().maxDoc());
parentLeafComparators = new LeafFieldComparator[parentComparators.length];
for (int i = 0; i < parentComparators.length; i++) {
parentLeafComparators[i] = parentComparators[i].getLeafComparator(context);

View File

@ -17,94 +17,29 @@ package org.apache.lucene.index;
* limitations under the License.
*/
import static org.apache.lucene.search.DocIdSet.EMPTY;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.WeakHashMap;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.document.StringField;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.NumericDocValues;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BitsFilteredDocIdSet;
import org.apache.lucene.search.BlockJoinComparatorSource;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.QueryWrapperFilter;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.BitDocIdSet;
import org.apache.lucene.util.BitSet;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.FixedBitSet;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.SparseFixedBitSet;
public class TestBlockJoinSorter extends LuceneTestCase {
private static class BitSetCachingWrapperFilter extends Filter {
private final Filter filter;
private final Map<Object,BitDocIdSet> cache = Collections.synchronizedMap(new WeakHashMap<Object,BitDocIdSet>());
public BitSetCachingWrapperFilter(Filter filter) {
this.filter = filter;
}
@Override
public DocIdSet getDocIdSet(LeafReaderContext context, final Bits acceptDocs) throws IOException {
final LeafReader reader = context.reader();
final Object key = reader.getCoreCacheKey();
BitDocIdSet docIdSet = cache.get(key);
if (docIdSet == null) {
final DocIdSet uncached = filter.getDocIdSet(context, null);
final DocIdSetIterator it = uncached == null ? null : uncached.iterator();
if (it != null) {
docIdSet = new BitDocIdSet(BitSet.of(it, context.reader().maxDoc()));
}
if (docIdSet == null) {
docIdSet = new BitDocIdSet(new SparseFixedBitSet(context.reader().maxDoc()));
}
cache.put(key, docIdSet);
}
return docIdSet == EMPTY ? null : BitsFilteredDocIdSet.wrap(docIdSet, acceptDocs);
}
@Override
public boolean equals(Object obj) {
if (super.equals(obj) == false) {
return false;
}
return filter.equals(((BitSetCachingWrapperFilter) obj).filter);
}
@Override
public int hashCode() {
return 31 * super.hashCode() + filter.hashCode();
}
@Override
public String toString(String field) {
return getClass().getName() + "(" + filter.toString(field) + ")";
}
}
public void test() throws IOException {
final int numParents = atLeast(200);
IndexWriterConfig cfg = newIndexWriterConfig(new MockAnalyzer(random()));
@ -132,8 +67,11 @@ public class TestBlockJoinSorter extends LuceneTestCase {
writer.close();
final LeafReader reader = getOnlySegmentReader(indexReader);
final Filter parentsFilter = new BitSetCachingWrapperFilter(new QueryWrapperFilter(new TermQuery(new Term("parent", "true"))));
final FixedBitSet parentBits = (FixedBitSet) parentsFilter.getDocIdSet(reader.getContext(), null).bits();
final Query parentsFilter = new TermQuery(new Term("parent", "true"));
IndexSearcher searcher = newSearcher(reader);
final Weight weight = searcher.createNormalizedWeight(parentsFilter, false);
final DocIdSetIterator parents = weight.scorer(reader.getContext());
final BitSet parentBits = BitSet.of(parents, reader.maxDoc());
final NumericDocValues parentValues = reader.getNumericDocValues("parent_val");
final NumericDocValues childValues = reader.getNumericDocValues("child_val");

View File

@ -39,17 +39,19 @@ import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.NumericDocValues;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.ConstantScoreScorer;
import org.apache.lucene.search.ConstantScoreWeight;
import org.apache.lucene.search.FieldDoc;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
import org.apache.lucene.search.TopFieldDocs;
import org.apache.lucene.search.Weight;
import org.apache.lucene.store.Directory;
import org.apache.lucene.uninverting.UninvertingReader.Type;
import org.apache.lucene.util.BitDocIdSet;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BitSetIterator;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FixedBitSet;
import org.apache.lucene.util.LuceneTestCase;
@ -160,7 +162,7 @@ public class TestFieldCacheSortRandom extends LuceneTestCase {
sort = new Sort(sf, SortField.FIELD_DOC);
}
final int hitCount = TestUtil.nextInt(random, 1, r.maxDoc() + 20);
final RandomFilter f = new RandomFilter(random.nextLong(), random.nextFloat(), docValues);
final RandomQuery f = new RandomQuery(random.nextLong(), random.nextFloat(), docValues);
int queryType = random.nextInt(2);
if (queryType == 0) {
hits = s.search(new ConstantScoreQuery(f),
@ -251,35 +253,40 @@ public class TestFieldCacheSortRandom extends LuceneTestCase {
dir.close();
}
private static class RandomFilter extends Filter {
private static class RandomQuery extends Query {
private final long seed;
private float density;
private final List<BytesRef> docValues;
public final List<BytesRef> matchValues = Collections.synchronizedList(new ArrayList<BytesRef>());
// density should be 0.0 ... 1.0
public RandomFilter(long seed, float density, List<BytesRef> docValues) {
public RandomQuery(long seed, float density, List<BytesRef> docValues) {
this.seed = seed;
this.density = density;
this.docValues = docValues;
}
@Override
public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException {
Random random = new Random(seed ^ context.docBase);
final int maxDoc = context.reader().maxDoc();
final NumericDocValues idSource = DocValues.getNumeric(context.reader(), "id");
assertNotNull(idSource);
final FixedBitSet bits = new FixedBitSet(maxDoc);
for(int docID=0;docID<maxDoc;docID++) {
if (random.nextFloat() <= density && (acceptDocs == null || acceptDocs.get(docID))) {
bits.set(docID);
//System.out.println(" acc id=" + idSource.getInt(docID) + " docID=" + docID);
matchValues.add(docValues.get((int) idSource.get(docID)));
}
}
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
return new ConstantScoreWeight(this) {
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
Random random = new Random(seed ^ context.docBase);
final int maxDoc = context.reader().maxDoc();
final NumericDocValues idSource = DocValues.getNumeric(context.reader(), "id");
assertNotNull(idSource);
final FixedBitSet bits = new FixedBitSet(maxDoc);
for(int docID=0;docID<maxDoc;docID++) {
if (random.nextFloat() <= density) {
bits.set(docID);
//System.out.println(" acc id=" + idSource.getInt(docID) + " docID=" + docID);
matchValues.add(docValues.get((int) idSource.get(docID)));
}
}
return new BitDocIdSet(bits);
return new ConstantScoreScorer(this, score(), new BitSetIterator(bits, bits.approximateCardinality()));
}
};
}
@Override
@ -292,7 +299,7 @@ public class TestFieldCacheSortRandom extends LuceneTestCase {
if (super.equals(obj) == false) {
return false;
}
RandomFilter other = (RandomFilter) obj;
RandomQuery other = (RandomQuery) obj;
return seed == other.seed && docValues == other.docValues;
}

View File

@ -47,6 +47,7 @@ import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BitDocIdSet;
import org.apache.lucene.util.BitSetIterator;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FixedBitSet;
@ -584,7 +585,7 @@ public class TestTermAutomatonQuery extends LuceneTestCase {
if (VERBOSE) {
System.out.println(" use random filter");
}
RandomFilter filter = new RandomFilter(random().nextLong(), random().nextFloat());
RandomQuery filter = new RandomQuery(random().nextLong(), random().nextFloat());
q1 = new BooleanQuery.Builder()
.add(q1, Occur.MUST)
.add(filter, Occur.FILTER)
@ -630,29 +631,33 @@ public class TestTermAutomatonQuery extends LuceneTestCase {
return result;
}
private static class RandomFilter extends Filter {
private static class RandomQuery extends Query {
private final long seed;
private float density;
// density should be 0.0 ... 1.0
public RandomFilter(long seed, float density) {
public RandomQuery(long seed, float density) {
this.seed = seed;
this.density = density;
}
@Override
public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException {
int maxDoc = context.reader().maxDoc();
FixedBitSet bits = new FixedBitSet(maxDoc);
Random random = new Random(seed ^ context.docBase);
for(int docID=0;docID<maxDoc;docID++) {
if (random.nextFloat() <= density && (acceptDocs == null || acceptDocs.get(docID))) {
bits.set(docID);
//System.out.println(" acc id=" + idSource.getInt(docID) + " docID=" + docID);
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
return new ConstantScoreWeight(this) {
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
int maxDoc = context.reader().maxDoc();
FixedBitSet bits = new FixedBitSet(maxDoc);
Random random = new Random(seed ^ context.docBase);
for(int docID=0;docID<maxDoc;docID++) {
if (random.nextFloat() <= density) {
bits.set(docID);
//System.out.println(" acc id=" + idSource.getInt(docID) + " docID=" + docID);
}
}
return new ConstantScoreScorer(this, score(), new BitSetIterator(bits, bits.approximateCardinality()));
}
}
return new BitDocIdSet(bits);
};
}
@Override
@ -665,7 +670,7 @@ public class TestTermAutomatonQuery extends LuceneTestCase {
if (super.equals(obj) == false) {
return false;
}
RandomFilter other = (RandomFilter) obj;
RandomQuery other = (RandomQuery) obj;
return seed == other.seed && density == other.density;
}

View File

@ -34,7 +34,6 @@ import java.util.TreeMap;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.Query;
import org.apache.solr.analytics.accumulator.facet.FacetValueAccumulator;
import org.apache.solr.analytics.accumulator.facet.FieldFacetAccumulator;
@ -58,6 +57,7 @@ import org.apache.solr.common.util.NamedList;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.schema.SchemaField;
import org.apache.solr.search.DocSet;
import org.apache.solr.search.Filter;
import org.apache.solr.search.QParser;
import org.apache.solr.search.SolrIndexSearcher;
import org.apache.solr.search.SyntaxError;

View File

@ -23,7 +23,6 @@ import java.util.List;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Filter;
import org.apache.solr.analytics.accumulator.BasicAccumulator;
import org.apache.solr.analytics.accumulator.FacetingAccumulator;
import org.apache.solr.analytics.accumulator.ValueAccumulator;
@ -32,6 +31,7 @@ import org.apache.solr.common.params.SolrParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.search.DocSet;
import org.apache.solr.search.Filter;
import org.apache.solr.search.SolrIndexSearcher;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

View File

@ -31,7 +31,6 @@ import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryWrapperFilter;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.join.QueryBitSetProducer;
@ -460,7 +459,7 @@ public class TestHierarchicalDocBuilder extends AbstractDataImportHandlerTestCas
private BitSetProducer createParentFilter(String type) {
BooleanQuery.Builder parentQuery = new BooleanQuery.Builder();
parentQuery.add(new TermQuery(new Term("type_s", type)), Occur.MUST);
return new QueryBitSetProducer(new QueryWrapperFilter(parentQuery.build()));
return new QueryBitSetProducer(parentQuery.build());
}
private String nextId() {

View File

@ -54,7 +54,7 @@ import org.apache.lucene.search.Collector;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.LeafCollector;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryWrapperFilter;
import org.apache.solr.search.QueryWrapperFilter;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Sort;

View File

@ -37,14 +37,12 @@ import org.apache.lucene.search.ConstantScoreScorer;
import org.apache.lucene.search.ConstantScoreWeight;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.AttributeSource;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.DocIdSetBuilder;
import org.apache.lucene.util.FixedBitSet;
@ -53,7 +51,7 @@ import org.apache.solr.search.DocSet;
import org.apache.solr.search.DocSetBuilder;
import org.apache.solr.search.DocSetProducer;
import org.apache.solr.search.ExtendedQueryBase;
import org.apache.solr.search.SolrConstantScoreQuery;
import org.apache.solr.search.Filter;
import org.apache.solr.search.SolrIndexSearcher;
/** @lucene.experimental */

View File

@ -26,7 +26,6 @@ import org.apache.lucene.index.SortedDocValues;
import org.apache.lucene.index.SortedSetDocValues;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Filter;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.CharsRefBuilder;
@ -37,6 +36,7 @@ import org.apache.solr.common.util.NamedList;
import org.apache.solr.schema.FieldType;
import org.apache.solr.schema.SchemaField;
import org.apache.solr.search.DocSet;
import org.apache.solr.search.Filter;
import org.apache.solr.search.SolrIndexSearcher;
import org.apache.solr.util.LongPriorityQueue;

View File

@ -30,7 +30,6 @@ import org.apache.lucene.index.SortedDocValues;
import org.apache.lucene.index.SortedSetDocValues;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Filter;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.LongValues;
import org.apache.solr.common.SolrException;
@ -41,6 +40,7 @@ import org.apache.solr.handler.component.StatsValuesFactory;
import org.apache.solr.schema.FieldType;
import org.apache.solr.schema.SchemaField;
import org.apache.solr.search.DocSet;
import org.apache.solr.search.Filter;
import org.apache.solr.search.SolrIndexSearcher;
/**

View File

@ -17,7 +17,6 @@ import org.apache.lucene.index.SortedDocValues;
import org.apache.lucene.index.SortedSetDocValues;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Filter;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.NumericUtils;
@ -30,6 +29,7 @@ import org.apache.solr.schema.SchemaField;
import org.apache.solr.schema.TrieDateField;
import org.apache.solr.search.DocIterator;
import org.apache.solr.search.DocSet;
import org.apache.solr.search.Filter;
import org.apache.solr.search.QueryParsing;
import org.apache.solr.search.SolrIndexSearcher;
import org.apache.solr.search.SyntaxError;

View File

@ -23,7 +23,6 @@ import org.apache.lucene.index.SortedDocValues;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Filter;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.CharsRefBuilder;
@ -34,6 +33,7 @@ import org.apache.solr.common.params.FacetParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.schema.FieldType;
import org.apache.solr.search.DocSet;
import org.apache.solr.search.Filter;
import org.apache.solr.search.SolrIndexSearcher;
import org.apache.solr.util.BoundedTreeSet;

View File

@ -29,7 +29,6 @@ import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.FilterCollector;
import org.apache.lucene.search.LeafCollector;
import org.apache.lucene.search.Query;
@ -62,6 +61,7 @@ import org.apache.solr.schema.SchemaField;
import org.apache.solr.schema.TrieField;
import org.apache.solr.search.BitDocSet;
import org.apache.solr.search.DocSet;
import org.apache.solr.search.Filter;
import org.apache.solr.search.Grouping;
import org.apache.solr.search.HashDocSet;
import org.apache.solr.search.Insanity;

View File

@ -21,7 +21,6 @@ import java.io.IOException;
import org.apache.lucene.index.StorableField;
import org.apache.lucene.index.StoredDocument;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryWrapperFilter;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.join.QueryBitSetProducer;
import org.apache.lucene.search.join.BitSetProducer;
@ -38,6 +37,7 @@ import org.apache.solr.schema.SchemaField;
import org.apache.solr.search.DocIterator;
import org.apache.solr.search.DocList;
import org.apache.solr.search.QParser;
import org.apache.solr.search.QueryWrapperFilter;
import org.apache.solr.search.SyntaxError;
/**

View File

@ -42,15 +42,15 @@ import org.apache.lucene.queries.function.ValueSource;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.FieldValueQuery;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryWrapperFilter;
import org.apache.lucene.search.SortField;
import org.apache.lucene.uninverting.UninvertingReader.Type;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.response.TextResponseWriter;
import org.apache.solr.search.Filter;
import org.apache.solr.search.QParser;
import org.apache.solr.search.QueryWrapperFilter;
import org.apache.solr.search.SolrConstantScoreQuery;
import org.apache.solr.search.function.ValueSourceRangeFilter;
import org.slf4j.Logger;

View File

@ -22,10 +22,8 @@ import java.util.Collections;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.BitsFilteredDocIdSet;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Filter;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.BitDocIdSet;
import org.apache.lucene.util.BitSetIterator;

View File

@ -1,4 +1,4 @@
package org.apache.lucene.search;
package org.apache.solr.search;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
@ -19,18 +19,18 @@ package org.apache.lucene.search;
import java.util.Objects;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.util.Bits;
/**
* This implementation supplies a filtered DocIdSet, that excludes all
* docids which are not in a Bits instance. This is especially useful in
* {@link org.apache.lucene.search.Filter} to apply the {@code acceptDocs}
* {@link org.apache.solr.search.Filter} to apply the {@code acceptDocs}
* passed to {@code getDocIdSet()} before returning the final DocIdSet.
*
* @see DocIdSet
* @see org.apache.lucene.search.Filter
* @see org.apache.solr.search.Filter
*/
public final class BitsFilteredDocIdSet extends FilteredDocIdSet {
private final Bits acceptDocs;

View File

@ -19,7 +19,6 @@ package org.apache.solr.search;
import java.io.Closeable;
import org.apache.lucene.search.Filter;
import org.apache.lucene.util.Accountable;
import org.apache.solr.common.SolrException;

View File

@ -21,10 +21,8 @@ import java.io.IOException;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.BitsFilteredDocIdSet;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Filter;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BitDocIdSet;
import org.apache.lucene.util.FixedBitSet;

View File

@ -20,10 +20,8 @@ package org.apache.solr.search;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.ExitableDirectoryReader;
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
@ -31,12 +29,9 @@ import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.queries.function.ValueSource;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.util.Bits;

View File

@ -1,4 +1,4 @@
package org.apache.lucene.search;
package org.apache.solr.search;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
@ -22,6 +22,15 @@ import java.util.Set;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.ConstantScoreScorer;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.TwoPhaseIterator;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.Bits;
/**

View File

@ -1,4 +1,4 @@
package org.apache.lucene.search;
package org.apache.solr.search;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
@ -20,6 +20,9 @@ package org.apache.lucene.search;
import java.io.IOException;
import java.util.Collection;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.FilteredDocIdSetIterator;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.RamUsageEstimator;

View File

@ -36,7 +36,6 @@ import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.CachingCollector;
import org.apache.lucene.search.Collector;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.MultiCollector;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;

View File

@ -18,49 +18,34 @@
package org.apache.solr.search;
import java.io.IOException;
import java.io.Serializable;
import java.util.List;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Semaphore;
import java.util.concurrent.Future;
import com.google.common.primitives.Longs;
import org.apache.lucene.index.IndexReaderContext;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.IndexReaderContext;
import org.apache.lucene.index.NumericDocValues;
import org.apache.lucene.index.SortedDocValues;
import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.LeafCollector;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.BitDocIdSet;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.CharsRef;
import org.apache.lucene.util.CharsRefBuilder;
import org.apache.lucene.util.FixedBitSet;
import org.apache.lucene.util.Bits;
import org.apache.lucene.search.BitsFilteredDocIdSet;
import org.apache.solr.common.params.SolrParams;
import org.apache.solr.common.util.SolrjNamedThreadFactory;
import org.apache.solr.core.CloseHook;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.schema.IndexSchema;
import org.apache.solr.schema.FieldType;
import org.apache.solr.schema.StrField;
import org.apache.solr.schema.TrieField;
import org.apache.solr.core.SolrCore;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.Filter;
import org.apache.lucene.index.SortedDocValues;
import org.apache.lucene.index.NumericDocValues;
import org.apache.lucene.util.BytesRef;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.schema.FieldType;
import org.apache.solr.schema.IndexSchema;
import org.apache.solr.schema.StrField;
import com.google.common.primitives.Longs;
/**
* syntax fq={!hash workers=11 worker=4 keys=field1,field2}

View File

@ -33,7 +33,6 @@ import org.apache.lucene.search.ConstantScoreScorer;
import org.apache.lucene.search.ConstantScoreWeight;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;

View File

@ -1,4 +1,4 @@
package org.apache.lucene.search;
package org.apache.solr.search;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
@ -21,6 +21,14 @@ import java.io.IOException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.BoostQuery;
import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.NumericRangeQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.Bits;
/**

View File

@ -3,14 +3,12 @@ package org.apache.solr.search;
import java.io.IOException;
import java.util.Map;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.queries.function.ValueSource;
import org.apache.lucene.search.ConstantScoreScorer;
import org.apache.lucene.search.ConstantScoreWeight;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;

View File

@ -18,7 +18,6 @@
package org.apache.solr.search;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.util.Bits;

View File

@ -22,10 +22,8 @@ import java.util.Collections;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.BitsFilteredDocIdSet;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Filter;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.FixedBitSet;

View File

@ -26,10 +26,8 @@ import org.apache.lucene.search.AutomatonQuery;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.DocValuesTermsQuery;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryWrapperFilter;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;

View File

@ -39,7 +39,6 @@ import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.util.BytesRef;
@ -54,6 +53,7 @@ import org.apache.solr.schema.FieldType;
import org.apache.solr.schema.SchemaField;
import org.apache.solr.schema.TrieField;
import org.apache.solr.search.DocSet;
import org.apache.solr.search.Filter;
import org.apache.solr.search.HashDocSet;
import org.apache.solr.search.SolrIndexSearcher;
import org.apache.solr.search.SortedIntDocSet;

View File

@ -22,8 +22,8 @@ import org.apache.lucene.queries.function.ValueSource;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.BitsFilteredDocIdSet;
import org.apache.lucene.util.Bits;
import org.apache.solr.search.BitsFilteredDocIdSet;
import org.apache.solr.search.SolrFilter;
import java.io.IOException;

View File

@ -27,14 +27,12 @@ import org.apache.lucene.queries.function.ValueSource;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.Collector;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.MultiCollector;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TimeLimitingCollector;
import org.apache.lucene.search.TotalHitCountCollector;
import org.apache.lucene.search.grouping.AbstractAllGroupHeadsCollector;
import org.apache.lucene.search.grouping.function.FunctionAllGroupHeadsCollector;
import org.apache.lucene.search.grouping.function.FunctionAllGroupsCollector;
import org.apache.lucene.search.grouping.term.TermAllGroupHeadsCollector;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.schema.FieldType;

View File

@ -20,13 +20,10 @@ package org.apache.solr.search.join;
import java.io.IOException;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.BitsFilteredDocIdSet;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryWrapperFilter;
import org.apache.lucene.search.join.QueryBitSetProducer;
import org.apache.lucene.search.join.BitSetProducer;
import org.apache.lucene.search.join.QueryBitSetProducer;
import org.apache.lucene.search.join.ScoreMode;
import org.apache.lucene.search.join.ToParentBlockJoinQuery;
import org.apache.lucene.util.BitDocIdSet;
@ -34,6 +31,8 @@ import org.apache.lucene.util.BitSet;
import org.apache.lucene.util.Bits;
import org.apache.solr.common.params.SolrParams;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.search.BitsFilteredDocIdSet;
import org.apache.solr.search.Filter;
import org.apache.solr.search.QParser;
import org.apache.solr.search.QueryParsing;
import org.apache.solr.search.SolrCache;
@ -96,7 +95,7 @@ class BlockJoinParentQParser extends QParser {
}
private BitSetProducer createParentFilter(Query parentQ) {
return new QueryBitSetProducer(new QueryWrapperFilter(parentQ));
return new QueryBitSetProducer(parentQ);
}
// We need this wrapper since BitDocIdSetFilter does not extend Filter

View File

@ -33,7 +33,6 @@ import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
@ -48,6 +47,7 @@ import org.apache.lucene.util.automaton.DaciukMihovAutomatonBuilder;
import org.apache.solr.handler.component.ResponseBuilder;
import org.apache.solr.search.BitDocSet;
import org.apache.solr.search.DocSet;
import org.apache.solr.search.Filter;
import org.apache.solr.search.SolrIndexSearcher;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

View File

@ -38,7 +38,6 @@ import org.apache.lucene.index.SortedSetDocValues;
import org.apache.lucene.index.StoredFieldVisitor;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Filter;
import org.apache.lucene.util.BitSetIterator;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.FixedBitSet;

View File

@ -1,4 +1,4 @@
package org.apache.lucene.search;
package org.apache.solr.search;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
@ -26,15 +26,21 @@ import junit.framework.Assert;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.LuceneTestCase;
public class TestDocIdSet extends LuceneTestCase {
public class TestFilteredDocIdSet extends LuceneTestCase {
public void testFilteredDocIdSet() throws Exception {
final int maxdoc=10;
final DocIdSet innerSet = new DocIdSet() {

View File

@ -1,4 +1,4 @@
package org.apache.lucene.search;
package org.apache.solr.search;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
@ -29,6 +29,17 @@ import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.FuzzyQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.RandomApproximationQuery;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.Weight;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.English;
@ -224,6 +235,6 @@ public class TestQueryWrapperFilter extends LuceneTestCase {
}
public void testBasics() {
QueryUtils.check(new QueryWrapperFilter(new TermQuery(new Term("foo", "bar"))));
org.apache.lucene.search.QueryUtils.check(new QueryWrapperFilter(new TermQuery(new Term("foo", "bar"))));
}
}

View File

@ -34,15 +34,12 @@ import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.BitsFilteredDocIdSet;
import org.apache.lucene.search.Collector;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.FilterCollector;
import org.apache.lucene.search.FilterLeafCollector;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.LeafCollector;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;

View File

@ -39,7 +39,6 @@ import javax.xml.stream.XMLStreamReader;
import org.apache.commons.io.output.ByteArrayOutputStream;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.QueryWrapperFilter;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TermRangeQuery;
import org.apache.lucene.search.TopDocs;
@ -53,6 +52,7 @@ import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.common.util.ExecutorUtil;
import org.apache.solr.common.util.JavaBinCodec;
import org.apache.solr.handler.loader.XMLLoader;
import org.apache.solr.search.QueryWrapperFilter;
import org.apache.solr.search.SolrIndexSearcher;
import org.apache.solr.util.DefaultSolrThreadFactory;
import org.apache.solr.util.RefCounted;
@ -562,8 +562,8 @@ public class AddBlockUpdateTest extends SolrTestCaseJ4 {
protected ToParentBlockJoinQuery join(final String childTerm) {
return new ToParentBlockJoinQuery(
new TermQuery(new Term(child, childTerm)), new QueryBitSetProducer(new QueryWrapperFilter(
new TermRangeQuery(parent, null, null, false, false))), ScoreMode.None);
new TermQuery(new Term(child, childTerm)), new QueryBitSetProducer(
new TermRangeQuery(parent, null, null, false, false)), ScoreMode.None);
}
private Collection<? extends Callable<Void>> callables(List<Document> blocks) {