mirror of https://github.com/apache/lucene.git
LUCENE-1187: Fix ChainedFilterTest.java and BooleanFilterTest.java to not reference class from test/org/apache/lucene/search/, which is not in the contrib classpath.
git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@659740 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
14f3663f56
commit
1d6d186888
|
@ -22,6 +22,8 @@ import java.util.*;
|
|||
import java.io.IOException;
|
||||
import java.text.ParseException;
|
||||
import java.text.SimpleDateFormat;
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.IndexWriter.MaxFieldLength;
|
||||
|
@ -89,7 +91,18 @@ public class ChainedFilterTest extends TestCase {
|
|||
private Filter[] getChainWithOldFilters(Filter[] chain) {
|
||||
Filter[] oldFilters = new Filter[chain.length];
|
||||
for (int i = 0; i < chain.length; i++) {
|
||||
oldFilters[i] = new OldBitSetFilterWrapper(chain[i]);
|
||||
final Filter f = chain[i];
|
||||
// create old BitSet-based Filter as wrapper
|
||||
oldFilters[i] = new Filter() {
|
||||
public BitSet bits(IndexReader reader) throws IOException {
|
||||
BitSet bits = new BitSet(reader.maxDoc());
|
||||
DocIdSetIterator it = f.getDocIdSet(reader).iterator();
|
||||
while(it.next()) {
|
||||
bits.set(it.doc());
|
||||
}
|
||||
return bits;
|
||||
}
|
||||
};
|
||||
}
|
||||
return oldFilters;
|
||||
}
|
||||
|
|
|
@ -67,11 +67,26 @@ public class BooleanFilterTest extends TestCase
|
|||
writer.addDocument(doc);
|
||||
}
|
||||
|
||||
private Filter getOldBitSetFilter(final Filter filter) {
|
||||
|
||||
return new Filter() {
|
||||
public BitSet bits(IndexReader reader) throws IOException {
|
||||
BitSet bits = new BitSet(reader.maxDoc());
|
||||
DocIdSetIterator it = filter.getDocIdSet(reader).iterator();
|
||||
while(it.next()) {
|
||||
bits.set(it.doc());
|
||||
}
|
||||
return bits;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
private Filter getRangeFilter(String field,String lowerPrice, String upperPrice, boolean old)
|
||||
{
|
||||
Filter f = new RangeFilter(field,lowerPrice,upperPrice,true,true);
|
||||
if (old) {
|
||||
return new OldBitSetFilterWrapper(f);
|
||||
return getOldBitSetFilter(f);
|
||||
}
|
||||
|
||||
return f;
|
||||
|
@ -81,7 +96,7 @@ public class BooleanFilterTest extends TestCase
|
|||
TermsFilter tf=new TermsFilter();
|
||||
tf.addTerm(new Term(field,text));
|
||||
if (old) {
|
||||
return new OldBitSetFilterWrapper(tf);
|
||||
return getOldBitSetFilter(tf);
|
||||
}
|
||||
|
||||
return tf;
|
||||
|
|
|
@ -1,165 +1,165 @@
|
|||
package org.apache.lucene.search;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashSet;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.TermDocs;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
|
||||
public class DuplicateFilterTest extends TestCase
|
||||
{
|
||||
private static final String KEY_FIELD = "url";
|
||||
private RAMDirectory directory;
|
||||
private IndexReader reader;
|
||||
TermQuery tq=new TermQuery(new Term("text","lucene"));
|
||||
private IndexSearcher searcher;
|
||||
|
||||
protected void setUp() throws Exception
|
||||
{
|
||||
directory = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(directory, new StandardAnalyzer(), true);
|
||||
|
||||
//Add series of docs with filterable fields : url, text and dates flags
|
||||
addDoc(writer, "http://lucene.apache.org", "lucene 1.4.3 available", "20040101");
|
||||
addDoc(writer, "http://lucene.apache.org", "New release pending", "20040102");
|
||||
addDoc(writer, "http://lucene.apache.org", "Lucene 1.9 out now", "20050101");
|
||||
addDoc(writer, "http://www.bar.com", "Local man bites dog", "20040101");
|
||||
addDoc(writer, "http://www.bar.com", "Dog bites local man", "20040102");
|
||||
addDoc(writer, "http://www.bar.com", "Dog uses Lucene", "20050101");
|
||||
addDoc(writer, "http://lucene.apache.org", "Lucene 2.0 out", "20050101");
|
||||
addDoc(writer, "http://lucene.apache.org", "Oops. Lucene 2.1 out", "20050102");
|
||||
|
||||
writer.close();
|
||||
reader=IndexReader.open(directory);
|
||||
searcher =new IndexSearcher(reader);
|
||||
|
||||
}
|
||||
|
||||
protected void tearDown() throws Exception
|
||||
{
|
||||
reader.close();
|
||||
searcher.close();
|
||||
directory.close();
|
||||
}
|
||||
|
||||
private void addDoc(IndexWriter writer, String url, String text, String date) throws IOException
|
||||
{
|
||||
Document doc=new Document();
|
||||
doc.add(new Field(KEY_FIELD,url,Field.Store.YES,Field.Index.UN_TOKENIZED));
|
||||
doc.add(new Field("text",text,Field.Store.YES,Field.Index.TOKENIZED));
|
||||
doc.add(new Field("date",date,Field.Store.YES,Field.Index.TOKENIZED));
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
|
||||
public void testDefaultFilter() throws Throwable
|
||||
{
|
||||
DuplicateFilter df=new DuplicateFilter(KEY_FIELD);
|
||||
HashSet results=new HashSet();
|
||||
Hits h = searcher.search(tq,df);
|
||||
for(int i=0;i<h.length();i++)
|
||||
{
|
||||
Document d=h.doc(i);
|
||||
String url=d.get(KEY_FIELD);
|
||||
assertFalse("No duplicate urls should be returned",results.contains(url));
|
||||
results.add(url);
|
||||
}
|
||||
}
|
||||
public void testNoFilter() throws Throwable
|
||||
{
|
||||
HashSet results=new HashSet();
|
||||
Hits h = searcher.search(tq);
|
||||
assertTrue("Default searching should have found some matches",h.length()>0);
|
||||
boolean dupsFound=false;
|
||||
for(int i=0;i<h.length();i++)
|
||||
{
|
||||
Document d=h.doc(i);
|
||||
String url=d.get(KEY_FIELD);
|
||||
if(!dupsFound)
|
||||
dupsFound=results.contains(url);
|
||||
results.add(url);
|
||||
}
|
||||
assertTrue("Default searching should have found duplicate urls",dupsFound);
|
||||
}
|
||||
|
||||
public void testFastFilter() throws Throwable
|
||||
{
|
||||
DuplicateFilter df=new DuplicateFilter(KEY_FIELD);
|
||||
df.setProcessingMode(DuplicateFilter.PM_FAST_INVALIDATION);
|
||||
HashSet results=new HashSet();
|
||||
Hits h = searcher.search(tq,df);
|
||||
assertTrue("Filtered searching should have found some matches",h.length()>0);
|
||||
for(int i=0;i<h.length();i++)
|
||||
{
|
||||
Document d=h.doc(i);
|
||||
String url=d.get(KEY_FIELD);
|
||||
assertFalse("No duplicate urls should be returned",results.contains(url));
|
||||
results.add(url);
|
||||
}
|
||||
assertEquals("Two urls found",2, results.size());
|
||||
}
|
||||
public void testKeepsLastFilter() throws Throwable
|
||||
{
|
||||
DuplicateFilter df=new DuplicateFilter(KEY_FIELD);
|
||||
df.setKeepMode(DuplicateFilter.KM_USE_LAST_OCCURRENCE);
|
||||
Hits h = searcher.search(tq,df);
|
||||
assertTrue("Filtered searching should have found some matches",h.length()>0);
|
||||
for(int i=0;i<h.length();i++)
|
||||
{
|
||||
Document d=h.doc(i);
|
||||
String url=d.get(KEY_FIELD);
|
||||
TermDocs td = reader.termDocs(new Term(KEY_FIELD,url));
|
||||
int lastDoc=0;
|
||||
while(td.next())
|
||||
{
|
||||
lastDoc=td.doc();
|
||||
}
|
||||
assertEquals("Duplicate urls should return last doc",lastDoc, h.id((i)));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public void testKeepsFirstFilter() throws Throwable
|
||||
{
|
||||
DuplicateFilter df=new DuplicateFilter(KEY_FIELD);
|
||||
df.setKeepMode(DuplicateFilter.KM_USE_FIRST_OCCURRENCE);
|
||||
Hits h = searcher.search(tq,df);
|
||||
assertTrue("Filtered searching should have found some matches",h.length()>0);
|
||||
for(int i=0;i<h.length();i++)
|
||||
{
|
||||
Document d=h.doc(i);
|
||||
String url=d.get(KEY_FIELD);
|
||||
TermDocs td = reader.termDocs(new Term(KEY_FIELD,url));
|
||||
int lastDoc=0;
|
||||
td.next();
|
||||
lastDoc=td.doc();
|
||||
assertEquals("Duplicate urls should return first doc",lastDoc, h.id((i)));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
package org.apache.lucene.search;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashSet;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.TermDocs;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
|
||||
public class DuplicateFilterTest extends TestCase
|
||||
{
|
||||
private static final String KEY_FIELD = "url";
|
||||
private RAMDirectory directory;
|
||||
private IndexReader reader;
|
||||
TermQuery tq=new TermQuery(new Term("text","lucene"));
|
||||
private IndexSearcher searcher;
|
||||
|
||||
protected void setUp() throws Exception
|
||||
{
|
||||
directory = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(directory, new StandardAnalyzer(), true);
|
||||
|
||||
//Add series of docs with filterable fields : url, text and dates flags
|
||||
addDoc(writer, "http://lucene.apache.org", "lucene 1.4.3 available", "20040101");
|
||||
addDoc(writer, "http://lucene.apache.org", "New release pending", "20040102");
|
||||
addDoc(writer, "http://lucene.apache.org", "Lucene 1.9 out now", "20050101");
|
||||
addDoc(writer, "http://www.bar.com", "Local man bites dog", "20040101");
|
||||
addDoc(writer, "http://www.bar.com", "Dog bites local man", "20040102");
|
||||
addDoc(writer, "http://www.bar.com", "Dog uses Lucene", "20050101");
|
||||
addDoc(writer, "http://lucene.apache.org", "Lucene 2.0 out", "20050101");
|
||||
addDoc(writer, "http://lucene.apache.org", "Oops. Lucene 2.1 out", "20050102");
|
||||
|
||||
writer.close();
|
||||
reader=IndexReader.open(directory);
|
||||
searcher =new IndexSearcher(reader);
|
||||
|
||||
}
|
||||
|
||||
protected void tearDown() throws Exception
|
||||
{
|
||||
reader.close();
|
||||
searcher.close();
|
||||
directory.close();
|
||||
}
|
||||
|
||||
private void addDoc(IndexWriter writer, String url, String text, String date) throws IOException
|
||||
{
|
||||
Document doc=new Document();
|
||||
doc.add(new Field(KEY_FIELD,url,Field.Store.YES,Field.Index.UN_TOKENIZED));
|
||||
doc.add(new Field("text",text,Field.Store.YES,Field.Index.TOKENIZED));
|
||||
doc.add(new Field("date",date,Field.Store.YES,Field.Index.TOKENIZED));
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
|
||||
public void testDefaultFilter() throws Throwable
|
||||
{
|
||||
DuplicateFilter df=new DuplicateFilter(KEY_FIELD);
|
||||
HashSet results=new HashSet();
|
||||
Hits h = searcher.search(tq,df);
|
||||
for(int i=0;i<h.length();i++)
|
||||
{
|
||||
Document d=h.doc(i);
|
||||
String url=d.get(KEY_FIELD);
|
||||
assertFalse("No duplicate urls should be returned",results.contains(url));
|
||||
results.add(url);
|
||||
}
|
||||
}
|
||||
public void testNoFilter() throws Throwable
|
||||
{
|
||||
HashSet results=new HashSet();
|
||||
Hits h = searcher.search(tq);
|
||||
assertTrue("Default searching should have found some matches",h.length()>0);
|
||||
boolean dupsFound=false;
|
||||
for(int i=0;i<h.length();i++)
|
||||
{
|
||||
Document d=h.doc(i);
|
||||
String url=d.get(KEY_FIELD);
|
||||
if(!dupsFound)
|
||||
dupsFound=results.contains(url);
|
||||
results.add(url);
|
||||
}
|
||||
assertTrue("Default searching should have found duplicate urls",dupsFound);
|
||||
}
|
||||
|
||||
public void testFastFilter() throws Throwable
|
||||
{
|
||||
DuplicateFilter df=new DuplicateFilter(KEY_FIELD);
|
||||
df.setProcessingMode(DuplicateFilter.PM_FAST_INVALIDATION);
|
||||
HashSet results=new HashSet();
|
||||
Hits h = searcher.search(tq,df);
|
||||
assertTrue("Filtered searching should have found some matches",h.length()>0);
|
||||
for(int i=0;i<h.length();i++)
|
||||
{
|
||||
Document d=h.doc(i);
|
||||
String url=d.get(KEY_FIELD);
|
||||
assertFalse("No duplicate urls should be returned",results.contains(url));
|
||||
results.add(url);
|
||||
}
|
||||
assertEquals("Two urls found",2, results.size());
|
||||
}
|
||||
public void testKeepsLastFilter() throws Throwable
|
||||
{
|
||||
DuplicateFilter df=new DuplicateFilter(KEY_FIELD);
|
||||
df.setKeepMode(DuplicateFilter.KM_USE_LAST_OCCURRENCE);
|
||||
Hits h = searcher.search(tq,df);
|
||||
assertTrue("Filtered searching should have found some matches",h.length()>0);
|
||||
for(int i=0;i<h.length();i++)
|
||||
{
|
||||
Document d=h.doc(i);
|
||||
String url=d.get(KEY_FIELD);
|
||||
TermDocs td = reader.termDocs(new Term(KEY_FIELD,url));
|
||||
int lastDoc=0;
|
||||
while(td.next())
|
||||
{
|
||||
lastDoc=td.doc();
|
||||
}
|
||||
assertEquals("Duplicate urls should return last doc",lastDoc, h.id((i)));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public void testKeepsFirstFilter() throws Throwable
|
||||
{
|
||||
DuplicateFilter df=new DuplicateFilter(KEY_FIELD);
|
||||
df.setKeepMode(DuplicateFilter.KM_USE_FIRST_OCCURRENCE);
|
||||
Hits h = searcher.search(tq,df);
|
||||
assertTrue("Filtered searching should have found some matches",h.length()>0);
|
||||
for(int i=0;i<h.length();i++)
|
||||
{
|
||||
Document d=h.doc(i);
|
||||
String url=d.get(KEY_FIELD);
|
||||
TermDocs td = reader.termDocs(new Term(KEY_FIELD,url));
|
||||
int lastDoc=0;
|
||||
td.next();
|
||||
lastDoc=td.doc();
|
||||
assertEquals("Duplicate urls should return first doc",lastDoc, h.id((i)));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -1,70 +1,70 @@
|
|||
package org.apache.lucene.search;
|
||||
|
||||
import java.util.BitSet;
|
||||
import java.util.HashSet;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import org.apache.lucene.analysis.WhitespaceAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.IndexWriter.MaxFieldLength;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
|
||||
public class TermsFilterTest extends TestCase
|
||||
{
|
||||
public void testCachability() throws Exception
|
||||
{
|
||||
TermsFilter a=new TermsFilter();
|
||||
a.addTerm(new Term("field1","a"));
|
||||
a.addTerm(new Term("field1","b"));
|
||||
HashSet cachedFilters=new HashSet();
|
||||
cachedFilters.add(a);
|
||||
TermsFilter b=new TermsFilter();
|
||||
b.addTerm(new Term("field1","a"));
|
||||
b.addTerm(new Term("field1","b"));
|
||||
|
||||
assertTrue("Must be cached",cachedFilters.contains(b));
|
||||
b.addTerm(new Term("field1","a")); //duplicate term
|
||||
assertTrue("Must be cached",cachedFilters.contains(b));
|
||||
b.addTerm(new Term("field1","c"));
|
||||
assertFalse("Must not be cached",cachedFilters.contains(b));
|
||||
|
||||
}
|
||||
public void testMissingTerms() throws Exception
|
||||
{
|
||||
String fieldName="field1";
|
||||
RAMDirectory rd=new RAMDirectory();
|
||||
IndexWriter w=new IndexWriter(rd,new WhitespaceAnalyzer(),MaxFieldLength.UNLIMITED);
|
||||
for (int i = 0; i < 100; i++)
|
||||
{
|
||||
Document doc=new Document();
|
||||
int term=i*10; //terms are units of 10;
|
||||
doc.add(new Field(fieldName,""+term,Field.Store.YES,Field.Index.UN_TOKENIZED));
|
||||
w.addDocument(doc);
|
||||
}
|
||||
w.close();
|
||||
IndexReader reader = IndexReader.open(rd);
|
||||
|
||||
TermsFilter tf=new TermsFilter();
|
||||
tf.addTerm(new Term(fieldName,"19"));
|
||||
BitSet bits = tf.bits(reader);
|
||||
assertEquals("Must match nothing", 0, bits.cardinality());
|
||||
|
||||
tf.addTerm(new Term(fieldName,"20"));
|
||||
bits=tf.bits(reader);
|
||||
assertEquals("Must match 1", 1, bits.cardinality());
|
||||
|
||||
tf.addTerm(new Term(fieldName,"10"));
|
||||
bits=tf.bits(reader);
|
||||
assertEquals("Must match 2", 2, bits.cardinality());
|
||||
|
||||
tf.addTerm(new Term(fieldName,"00"));
|
||||
bits=tf.bits(reader);
|
||||
assertEquals("Must match 2", 2, bits.cardinality());
|
||||
|
||||
}
|
||||
}
|
||||
package org.apache.lucene.search;
|
||||
|
||||
import java.util.BitSet;
|
||||
import java.util.HashSet;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import org.apache.lucene.analysis.WhitespaceAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.IndexWriter.MaxFieldLength;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
|
||||
public class TermsFilterTest extends TestCase
|
||||
{
|
||||
public void testCachability() throws Exception
|
||||
{
|
||||
TermsFilter a=new TermsFilter();
|
||||
a.addTerm(new Term("field1","a"));
|
||||
a.addTerm(new Term("field1","b"));
|
||||
HashSet cachedFilters=new HashSet();
|
||||
cachedFilters.add(a);
|
||||
TermsFilter b=new TermsFilter();
|
||||
b.addTerm(new Term("field1","a"));
|
||||
b.addTerm(new Term("field1","b"));
|
||||
|
||||
assertTrue("Must be cached",cachedFilters.contains(b));
|
||||
b.addTerm(new Term("field1","a")); //duplicate term
|
||||
assertTrue("Must be cached",cachedFilters.contains(b));
|
||||
b.addTerm(new Term("field1","c"));
|
||||
assertFalse("Must not be cached",cachedFilters.contains(b));
|
||||
|
||||
}
|
||||
public void testMissingTerms() throws Exception
|
||||
{
|
||||
String fieldName="field1";
|
||||
RAMDirectory rd=new RAMDirectory();
|
||||
IndexWriter w=new IndexWriter(rd,new WhitespaceAnalyzer(),MaxFieldLength.UNLIMITED);
|
||||
for (int i = 0; i < 100; i++)
|
||||
{
|
||||
Document doc=new Document();
|
||||
int term=i*10; //terms are units of 10;
|
||||
doc.add(new Field(fieldName,""+term,Field.Store.YES,Field.Index.UN_TOKENIZED));
|
||||
w.addDocument(doc);
|
||||
}
|
||||
w.close();
|
||||
IndexReader reader = IndexReader.open(rd);
|
||||
|
||||
TermsFilter tf=new TermsFilter();
|
||||
tf.addTerm(new Term(fieldName,"19"));
|
||||
OpenBitSet bits = (OpenBitSet)tf.getDocIdSet(reader);
|
||||
assertEquals("Must match nothing", 0, bits.cardinality());
|
||||
|
||||
tf.addTerm(new Term(fieldName,"20"));
|
||||
bits = (OpenBitSet)tf.getDocIdSet(reader);
|
||||
assertEquals("Must match 1", 1, bits.cardinality());
|
||||
|
||||
tf.addTerm(new Term(fieldName,"10"));
|
||||
bits = (OpenBitSet)tf.getDocIdSet(reader);
|
||||
assertEquals("Must match 2", 2, bits.cardinality());
|
||||
|
||||
tf.addTerm(new Term(fieldName,"00"));
|
||||
bits = (OpenBitSet)tf.getDocIdSet(reader);
|
||||
assertEquals("Must match 2", 2, bits.cardinality());
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,48 +0,0 @@
|
|||
package org.apache.lucene.search;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.BitSet;
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
|
||||
/**
|
||||
* Helper class used for testing compatibility with old BitSet-based filters.
|
||||
* Does not override {@link Filter#getDocIdSet(IndexReader)} and thus ensures
|
||||
* that {@link #bits(IndexReader)} is called.
|
||||
*
|
||||
* @deprecated This class will be removed together with the
|
||||
* {@link Filter#bits(IndexReader)} method in Lucene 3.0.
|
||||
*/
|
||||
public class OldBitSetFilterWrapper extends Filter {
|
||||
private Filter filter;
|
||||
|
||||
public OldBitSetFilterWrapper(Filter filter) {
|
||||
this.filter = filter;
|
||||
}
|
||||
|
||||
public BitSet bits(IndexReader reader) throws IOException {
|
||||
BitSet bits = new BitSet(reader.maxDoc());
|
||||
DocIdSetIterator it = filter.getDocIdSet(reader).iterator();
|
||||
while(it.next()) {
|
||||
bits.set(it.doc());
|
||||
}
|
||||
return bits;
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue