LUCENE-2858: fix more tests

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene2858@1237345 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Robert Muir 2012-01-29 17:01:16 +00:00
parent 45fdfc3822
commit 25cb7a77dd
9 changed files with 24 additions and 22 deletions

View File

@ -23,6 +23,7 @@ import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.StringField;
import org.apache.lucene.index.AtomicIndexReader.AtomicReaderContext;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.SerialMergeScheduler;
@ -114,7 +115,7 @@ public class TestCachingWrapperFilter extends LuceneTestCase {
}
private static void assertDocIdSetCacheable(IndexReader reader, Filter filter, boolean shouldCacheable) throws IOException {
assertTrue(reader.getTopReaderContext().isAtomic);
assertTrue(reader.getTopReaderContext() instanceof AtomicReaderContext);
AtomicReaderContext context = (AtomicReaderContext) reader.getTopReaderContext();
final CachingWrapperFilter cacher = new CachingWrapperFilter(filter);
final DocIdSet originalSet = filter.getDocIdSet(context, context.reader().getLiveDocs());
@ -171,7 +172,7 @@ public class TestCachingWrapperFilter extends LuceneTestCase {
// flipping a coin) may give us a newly opened reader,
// but we use .reopen on this reader below and expect to
// (must) get an NRT reader:
IndexReader reader = IndexReader.open(writer.w, true);
DirectoryReader reader = IndexReader.open(writer.w, true);
// same reason we don't wrap?
IndexSearcher searcher = newSearcher(reader, false);
@ -298,9 +299,9 @@ public class TestCachingWrapperFilter extends LuceneTestCase {
dir.close();
}
private static IndexReader refreshReader(IndexReader reader) throws IOException {
IndexReader oldReader = reader;
reader = IndexReader.openIfChanged(reader);
private static DirectoryReader refreshReader(DirectoryReader reader) throws IOException {
DirectoryReader oldReader = reader;
reader = DirectoryReader.openIfChanged(reader);
if (reader != null) {
oldReader.close();
return reader;

View File

@ -175,7 +175,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase {
dq.add(tq("dek", "DOES_NOT_EXIST"));
QueryUtils.check(random, dq, s);
assertTrue(s.getTopReaderContext().isAtomic);
assertTrue(s.getTopReaderContext() instanceof AtomicReaderContext);
final Weight dw = s.createNormalizedWeight(dq);
AtomicReaderContext context = (AtomicReaderContext)s.getTopReaderContext();
final Scorer ds = dw.scorer(context, true, false, context.reader().getLiveDocs());
@ -190,7 +190,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase {
final DisjunctionMaxQuery dq = new DisjunctionMaxQuery(0.0f);
dq.add(tq("dek", "albino"));
dq.add(tq("dek", "DOES_NOT_EXIST"));
assertTrue(s.getTopReaderContext().isAtomic);
assertTrue(s.getTopReaderContext() instanceof AtomicReaderContext);
QueryUtils.check(random, dq, s);
final Weight dw = s.createNormalizedWeight(dq);
AtomicReaderContext context = (AtomicReaderContext)s.getTopReaderContext();

View File

@ -101,7 +101,6 @@ public class TestFilteredSearch extends LuceneTestCase {
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) {
assertNull("acceptDocs should be null, as we have an index without deletions", acceptDocs);
assert context.isAtomic;
final FixedBitSet set = new FixedBitSet(context.reader().maxDoc());
int docBase = context.docBase;
final int limit = docBase+context.reader().maxDoc();

View File

@ -28,6 +28,7 @@ import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.AtomicIndexReader;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.IndexReader;
@ -209,7 +210,7 @@ public class TestPositionIncrement extends LuceneTestCase {
writer.addDocument(doc);
final IndexReader readerFromWriter = writer.getReader();
SlowCompositeReaderWrapper r = SlowCompositeReaderWrapper.wrap(readerFromWriter);
AtomicIndexReader r = SlowCompositeReaderWrapper.wrap(readerFromWriter);
DocsAndPositionsEnum tp = r.termPositionsEnum(r.getLiveDocs(),
"content",

View File

@ -76,7 +76,7 @@ public class TestTermScorer extends LuceneTestCase {
TermQuery termQuery = new TermQuery(allTerm);
Weight weight = indexSearcher.createNormalizedWeight(termQuery);
assertTrue(indexSearcher.getTopReaderContext().isAtomic);
assertTrue(indexSearcher.getTopReaderContext() instanceof AtomicReaderContext);
AtomicReaderContext context = (AtomicReaderContext)indexSearcher.getTopReaderContext();
Scorer ts = weight.scorer(context, true, true, context.reader().getLiveDocs());
// we have 2 documents with the term all in them, one document for all the
@ -138,7 +138,7 @@ public class TestTermScorer extends LuceneTestCase {
TermQuery termQuery = new TermQuery(allTerm);
Weight weight = indexSearcher.createNormalizedWeight(termQuery);
assertTrue(indexSearcher.getTopReaderContext().isAtomic);
assertTrue(indexSearcher.getTopReaderContext() instanceof AtomicReaderContext);
AtomicReaderContext context = (AtomicReaderContext) indexSearcher.getTopReaderContext();
Scorer ts = weight.scorer(context, true, true, context.reader().getLiveDocs());
assertTrue("next did not return a doc",
@ -157,7 +157,7 @@ public class TestTermScorer extends LuceneTestCase {
TermQuery termQuery = new TermQuery(allTerm);
Weight weight = indexSearcher.createNormalizedWeight(termQuery);
assertTrue(indexSearcher.getTopReaderContext().isAtomic);
assertTrue(indexSearcher.getTopReaderContext() instanceof AtomicReaderContext);
AtomicReaderContext context = (AtomicReaderContext) indexSearcher.getTopReaderContext();
Scorer ts = weight.scorer(context, true, true, context.reader().getLiveDocs());
assertTrue("Didn't skip", ts.advance(3) != DocIdSetIterator.NO_MORE_DOCS);

View File

@ -62,7 +62,7 @@ public class MultiSpansWrapper extends Spans { // can't be package private due t
}
AtomicReaderContext[] leaves = ReaderUtil.leaves(topLevelReaderContext);
if(leaves.length == 1) {
return query.getSpans(leaves[0], leaves[0].reader.getLiveDocs(), termContexts);
return query.getSpans(leaves[0], leaves[0].reader().getLiveDocs(), termContexts);
}
return new MultiSpansWrapper(leaves, query, termContexts);
}
@ -73,14 +73,14 @@ public class MultiSpansWrapper extends Spans { // can't be package private due t
return false;
}
if (current == null) {
current = query.getSpans(leaves[leafOrd], leaves[leafOrd].reader.getLiveDocs(), termContexts);
current = query.getSpans(leaves[leafOrd], leaves[leafOrd].reader().getLiveDocs(), termContexts);
}
while(true) {
if (current.next()) {
return true;
}
if (++leafOrd < leaves.length) {
current = query.getSpans(leaves[leafOrd], leaves[leafOrd].reader.getLiveDocs(), termContexts);
current = query.getSpans(leaves[leafOrd], leaves[leafOrd].reader().getLiveDocs(), termContexts);
} else {
current = null;
break;
@ -98,17 +98,17 @@ public class MultiSpansWrapper extends Spans { // can't be package private due t
int subIndex = ReaderUtil.subIndex(target, leaves);
assert subIndex >= leafOrd;
if (subIndex != leafOrd) {
current = query.getSpans(leaves[subIndex], leaves[subIndex].reader.getLiveDocs(), termContexts);
current = query.getSpans(leaves[subIndex], leaves[subIndex].reader().getLiveDocs(), termContexts);
leafOrd = subIndex;
} else if (current == null) {
current = query.getSpans(leaves[leafOrd], leaves[leafOrd].reader.getLiveDocs(), termContexts);
current = query.getSpans(leaves[leafOrd], leaves[leafOrd].reader().getLiveDocs(), termContexts);
}
while (true) {
if (current.skipTo(target - leaves[leafOrd].docBase)) {
return true;
}
if (++leafOrd < leaves.length) {
current = query.getSpans(leaves[leafOrd], leaves[leafOrd].reader.getLiveDocs(), termContexts);
current = query.getSpans(leaves[leafOrd], leaves[leafOrd].reader().getLiveDocs(), termContexts);
} else {
current = null;
break;

View File

@ -168,7 +168,7 @@ public class TestNearSpansOrdered extends LuceneTestCase {
Weight w = searcher.createNormalizedWeight(q);
ReaderContext topReaderContext = searcher.getTopReaderContext();
AtomicReaderContext[] leaves = ReaderUtil.leaves(topReaderContext);
Scorer s = w.scorer(leaves[0], true, false, leaves[0].reader.getLiveDocs());
Scorer s = w.scorer(leaves[0], true, false, leaves[0].reader().getLiveDocs());
assertEquals(1, s.advance(1));
}

View File

@ -433,7 +433,7 @@ public class TestSpans extends LuceneTestCase {
slop,
ordered);
spanScorer = searcher.createNormalizedWeight(snq).scorer(leaves[i], true, false, leaves[i].reader.getLiveDocs());
spanScorer = searcher.createNormalizedWeight(snq).scorer(leaves[i], true, false, leaves[i].reader().getLiveDocs());
} finally {
searcher.setSimilarityProvider(oldSim);
}

View File

@ -26,6 +26,7 @@ import java.util.List;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
@ -55,7 +56,7 @@ public class TestNRTCachingDirectory extends LuceneTestCase {
}
final List<BytesRef> ids = new ArrayList<BytesRef>();
IndexReader r = null;
DirectoryReader r = null;
for(int docCount=0;docCount<numDocs;docCount++) {
final Document doc = docs.nextDoc();
ids.add(new BytesRef(doc.get("docid")));
@ -64,7 +65,7 @@ public class TestNRTCachingDirectory extends LuceneTestCase {
if (r == null) {
r = IndexReader.open(w.w, false);
} else {
final IndexReader r2 = IndexReader.openIfChanged(r);
final DirectoryReader r2 = DirectoryReader.openIfChanged(r);
if (r2 != null) {
r.close();
r = r2;