mirror of https://github.com/apache/lucene.git
LUCENE-2858: Split IndexReader in AtomicReader and CompositeReader
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1238085 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
commit
226c270cd2
|
@ -29,9 +29,9 @@ import java.util.TreeSet;
|
||||||
|
|
||||||
import org.apache.lucene.analysis.CachingTokenFilter;
|
import org.apache.lucene.analysis.CachingTokenFilter;
|
||||||
import org.apache.lucene.analysis.TokenStream;
|
import org.apache.lucene.analysis.TokenStream;
|
||||||
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
import org.apache.lucene.index.Term;
|
import org.apache.lucene.index.Term;
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
|
||||||
import org.apache.lucene.index.memory.MemoryIndex;
|
import org.apache.lucene.index.memory.MemoryIndex;
|
||||||
import org.apache.lucene.search.*;
|
import org.apache.lucene.search.*;
|
||||||
import org.apache.lucene.search.spans.FieldMaskingSpanQuery;
|
import org.apache.lucene.search.spans.FieldMaskingSpanQuery;
|
||||||
|
@ -74,7 +74,7 @@ public class WeightedSpanTermExtractor {
|
||||||
|
|
||||||
for (final AtomicReaderContext ctx : ctxSet) {
|
for (final AtomicReaderContext ctx : ctxSet) {
|
||||||
try {
|
try {
|
||||||
ctx.reader.close();
|
ctx.reader().close();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
// alert?
|
// alert?
|
||||||
}
|
}
|
||||||
|
@ -153,7 +153,7 @@ public class WeightedSpanTermExtractor {
|
||||||
query = mtq;
|
query = mtq;
|
||||||
}
|
}
|
||||||
if (mtq.getField() != null) {
|
if (mtq.getField() != null) {
|
||||||
IndexReader ir = getLeafContextForField(mtq.getField()).reader;
|
IndexReader ir = getLeafContextForField(mtq.getField()).reader();
|
||||||
extract(query.rewrite(ir), terms);
|
extract(query.rewrite(ir), terms);
|
||||||
}
|
}
|
||||||
} else if (query instanceof MultiPhraseQuery) {
|
} else if (query instanceof MultiPhraseQuery) {
|
||||||
|
@ -244,7 +244,7 @@ public class WeightedSpanTermExtractor {
|
||||||
final boolean mustRewriteQuery = mustRewriteQuery(spanQuery);
|
final boolean mustRewriteQuery = mustRewriteQuery(spanQuery);
|
||||||
if (mustRewriteQuery) {
|
if (mustRewriteQuery) {
|
||||||
for (final String field : fieldNames) {
|
for (final String field : fieldNames) {
|
||||||
final SpanQuery rewrittenQuery = (SpanQuery) spanQuery.rewrite(getLeafContextForField(field).reader);
|
final SpanQuery rewrittenQuery = (SpanQuery) spanQuery.rewrite(getLeafContextForField(field).reader());
|
||||||
queries.put(field, rewrittenQuery);
|
queries.put(field, rewrittenQuery);
|
||||||
rewrittenQuery.extractTerms(nonWeightedTerms);
|
rewrittenQuery.extractTerms(nonWeightedTerms);
|
||||||
}
|
}
|
||||||
|
@ -268,7 +268,7 @@ public class WeightedSpanTermExtractor {
|
||||||
for (Term term : extractedTerms) {
|
for (Term term : extractedTerms) {
|
||||||
termContexts.put(term, TermContext.build(context, term, true));
|
termContexts.put(term, TermContext.build(context, term, true));
|
||||||
}
|
}
|
||||||
Bits acceptDocs = context.reader.getLiveDocs();
|
Bits acceptDocs = context.reader().getLiveDocs();
|
||||||
final Spans spans = q.getSpans(context, acceptDocs, termContexts);
|
final Spans spans = q.getSpans(context, acceptDocs, termContexts);
|
||||||
|
|
||||||
// collect span positions
|
// collect span positions
|
||||||
|
|
|
@ -30,11 +30,11 @@ import org.apache.lucene.document.FieldType;
|
||||||
import org.apache.lucene.document.Document;
|
import org.apache.lucene.document.Document;
|
||||||
import org.apache.lucene.document.Field;
|
import org.apache.lucene.document.Field;
|
||||||
import org.apache.lucene.document.TextField;
|
import org.apache.lucene.document.TextField;
|
||||||
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.index.CorruptIndexException;
|
import org.apache.lucene.index.CorruptIndexException;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
import org.apache.lucene.index.IndexWriter;
|
import org.apache.lucene.index.IndexWriter;
|
||||||
import org.apache.lucene.index.Term;
|
import org.apache.lucene.index.Term;
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
|
||||||
import org.apache.lucene.search.Collector;
|
import org.apache.lucene.search.Collector;
|
||||||
import org.apache.lucene.search.IndexSearcher;
|
import org.apache.lucene.search.IndexSearcher;
|
||||||
import org.apache.lucene.search.PhraseQuery;
|
import org.apache.lucene.search.PhraseQuery;
|
||||||
|
|
|
@ -33,6 +33,8 @@ import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
|
||||||
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
||||||
import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
|
import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
|
||||||
import org.apache.lucene.document.Document;
|
import org.apache.lucene.document.Document;
|
||||||
|
import org.apache.lucene.index.AtomicReader;
|
||||||
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.index.Norm;
|
import org.apache.lucene.index.Norm;
|
||||||
import org.apache.lucene.index.DocValues;
|
import org.apache.lucene.index.DocValues;
|
||||||
import org.apache.lucene.index.DocsAndPositionsEnum;
|
import org.apache.lucene.index.DocsAndPositionsEnum;
|
||||||
|
@ -41,7 +43,6 @@ import org.apache.lucene.index.FieldInfos;
|
||||||
import org.apache.lucene.index.FieldInvertState;
|
import org.apache.lucene.index.FieldInvertState;
|
||||||
import org.apache.lucene.index.Fields;
|
import org.apache.lucene.index.Fields;
|
||||||
import org.apache.lucene.index.FieldsEnum;
|
import org.apache.lucene.index.FieldsEnum;
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
import org.apache.lucene.index.OrdTermState;
|
import org.apache.lucene.index.OrdTermState;
|
||||||
import org.apache.lucene.index.StoredFieldVisitor;
|
import org.apache.lucene.index.StoredFieldVisitor;
|
||||||
|
@ -749,10 +750,9 @@ public class MemoryIndex {
|
||||||
* Search support for Lucene framework integration; implements all methods
|
* Search support for Lucene framework integration; implements all methods
|
||||||
* required by the Lucene IndexReader contracts.
|
* required by the Lucene IndexReader contracts.
|
||||||
*/
|
*/
|
||||||
private final class MemoryIndexReader extends IndexReader {
|
private final class MemoryIndexReader extends AtomicReader {
|
||||||
|
|
||||||
private IndexSearcher searcher; // needed to find searcher.getSimilarity()
|
private IndexSearcher searcher; // needed to find searcher.getSimilarity()
|
||||||
private final ReaderContext readerInfos = new AtomicReaderContext(this);
|
|
||||||
|
|
||||||
private MemoryIndexReader() {
|
private MemoryIndexReader() {
|
||||||
super(); // avoid as much superclass baggage as possible
|
super(); // avoid as much superclass baggage as possible
|
||||||
|
@ -776,20 +776,6 @@ public class MemoryIndex {
|
||||||
return fieldInfos;
|
return fieldInfos;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public int docFreq(String field, BytesRef term) {
|
|
||||||
Info info = getInfo(field);
|
|
||||||
int freq = 0;
|
|
||||||
if (info != null) freq = info.getPositions(term) != null ? 1 : 0;
|
|
||||||
if (DEBUG) System.err.println("MemoryIndexReader.docFreq: " + field + ":" + term + ", freq:" + freq);
|
|
||||||
return freq;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public ReaderContext getTopReaderContext() {
|
|
||||||
return readerInfos;
|
|
||||||
}
|
|
||||||
|
|
||||||
private class MemoryFields extends Fields {
|
private class MemoryFields extends Fields {
|
||||||
@Override
|
@Override
|
||||||
public FieldsEnum iterator() {
|
public FieldsEnum iterator() {
|
||||||
|
|
|
@ -33,6 +33,7 @@ import org.apache.lucene.codecs.lucene40.Lucene40PostingsFormat;
|
||||||
import org.apache.lucene.document.Document;
|
import org.apache.lucene.document.Document;
|
||||||
import org.apache.lucene.document.Field;
|
import org.apache.lucene.document.Field;
|
||||||
import org.apache.lucene.document.TextField;
|
import org.apache.lucene.document.TextField;
|
||||||
|
import org.apache.lucene.index.AtomicReader;
|
||||||
import org.apache.lucene.index.DocsAndPositionsEnum;
|
import org.apache.lucene.index.DocsAndPositionsEnum;
|
||||||
import org.apache.lucene.index.DocsEnum;
|
import org.apache.lucene.index.DocsEnum;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
|
@ -185,7 +186,7 @@ public class MemoryIndexTest extends BaseTokenStreamTestCase {
|
||||||
Analyzer analyzer = new MockAnalyzer(random);
|
Analyzer analyzer = new MockAnalyzer(random);
|
||||||
MemoryIndex memory = new MemoryIndex();
|
MemoryIndex memory = new MemoryIndex();
|
||||||
memory.addField("foo", "bar", analyzer);
|
memory.addField("foo", "bar", analyzer);
|
||||||
IndexReader reader = memory.createSearcher().getIndexReader();
|
AtomicReader reader = (AtomicReader) memory.createSearcher().getIndexReader();
|
||||||
DocsEnum disi = _TestUtil.docs(random, reader, "foo", new BytesRef("bar"), null, null, false);
|
DocsEnum disi = _TestUtil.docs(random, reader, "foo", new BytesRef("bar"), null, null, false);
|
||||||
int docid = disi.docID();
|
int docid = disi.docID();
|
||||||
assertTrue(docid == -1 || docid == DocIdSetIterator.NO_MORE_DOCS);
|
assertTrue(docid == -1 || docid == DocIdSetIterator.NO_MORE_DOCS);
|
||||||
|
@ -205,7 +206,7 @@ public class MemoryIndexTest extends BaseTokenStreamTestCase {
|
||||||
Analyzer analyzer = new MockAnalyzer(random);
|
Analyzer analyzer = new MockAnalyzer(random);
|
||||||
MemoryIndex memory = new MemoryIndex();
|
MemoryIndex memory = new MemoryIndex();
|
||||||
memory.addField("foo", "bar", analyzer);
|
memory.addField("foo", "bar", analyzer);
|
||||||
IndexReader reader = memory.createSearcher().getIndexReader();
|
AtomicReader reader = (AtomicReader) memory.createSearcher().getIndexReader();
|
||||||
DocsAndPositionsEnum disi = reader.termPositionsEnum(null, "foo", new BytesRef("bar"), false);
|
DocsAndPositionsEnum disi = reader.termPositionsEnum(null, "foo", new BytesRef("bar"), false);
|
||||||
int docid = disi.docID();
|
int docid = disi.docID();
|
||||||
assertTrue(docid == -1 || docid == DocIdSetIterator.NO_MORE_DOCS);
|
assertTrue(docid == -1 || docid == DocIdSetIterator.NO_MORE_DOCS);
|
||||||
|
|
|
@ -135,7 +135,7 @@ public class MultiPassIndexSplitter {
|
||||||
}
|
}
|
||||||
Directory dir = FSDirectory.open(new File(args[i]));
|
Directory dir = FSDirectory.open(new File(args[i]));
|
||||||
try {
|
try {
|
||||||
if (!IndexReader.indexExists(dir)) {
|
if (!DirectoryReader.indexExists(dir)) {
|
||||||
System.err.println("Invalid input index - skipping: " + file);
|
System.err.println("Invalid input index - skipping: " + file);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -143,7 +143,7 @@ public class MultiPassIndexSplitter {
|
||||||
System.err.println("Invalid input index - skipping: " + file);
|
System.err.println("Invalid input index - skipping: " + file);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
indexes.add(IndexReader.open(dir));
|
indexes.add(DirectoryReader.open(dir));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (outDir == null) {
|
if (outDir == null) {
|
||||||
|
@ -182,15 +182,15 @@ public class MultiPassIndexSplitter {
|
||||||
super(initSubReaders(reader), false /* dont close */);
|
super(initSubReaders(reader), false /* dont close */);
|
||||||
}
|
}
|
||||||
|
|
||||||
private static IndexReader[] initSubReaders(IndexReader reader) throws IOException {
|
private static AtomicReader[] initSubReaders(IndexReader reader) throws IOException {
|
||||||
final ArrayList<IndexReader> subs = new ArrayList<IndexReader>();
|
final ArrayList<AtomicReader> subs = new ArrayList<AtomicReader>();
|
||||||
new ReaderUtil.Gather(reader) {
|
new ReaderUtil.Gather(reader) {
|
||||||
@Override
|
@Override
|
||||||
protected void add(int base, IndexReader r) {
|
protected void add(int base, AtomicReader r) {
|
||||||
subs.add(new FakeDeleteAtomicIndexReader(r));
|
subs.add(new FakeDeleteAtomicIndexReader(r));
|
||||||
}
|
}
|
||||||
}.run();
|
}.run();
|
||||||
return subs.toArray(new IndexReader[subs.size()]);
|
return subs.toArray(new AtomicReader[subs.size()]);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void deleteDocument(int docID) {
|
public void deleteDocument(int docID) {
|
||||||
|
@ -226,7 +226,7 @@ public class MultiPassIndexSplitter {
|
||||||
private static final class FakeDeleteAtomicIndexReader extends FilterIndexReader {
|
private static final class FakeDeleteAtomicIndexReader extends FilterIndexReader {
|
||||||
FixedBitSet liveDocs;
|
FixedBitSet liveDocs;
|
||||||
|
|
||||||
public FakeDeleteAtomicIndexReader(IndexReader reader) {
|
public FakeDeleteAtomicIndexReader(AtomicReader reader) {
|
||||||
super(reader);
|
super(reader);
|
||||||
undeleteAll(); // initialize main bitset
|
undeleteAll(); // initialize main bitset
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,6 @@ package org.apache.lucene.index;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
|
||||||
import org.apache.lucene.search.DocIdSet;
|
import org.apache.lucene.search.DocIdSet;
|
||||||
import org.apache.lucene.search.DocIdSetIterator;
|
import org.apache.lucene.search.DocIdSetIterator;
|
||||||
import org.apache.lucene.search.Filter;
|
import org.apache.lucene.search.Filter;
|
||||||
|
@ -84,7 +83,7 @@ public class PKIndexSplitter {
|
||||||
|
|
||||||
public void split() throws IOException {
|
public void split() throws IOException {
|
||||||
boolean success = false;
|
boolean success = false;
|
||||||
IndexReader reader = IndexReader.open(input);
|
DirectoryReader reader = DirectoryReader.open(input);
|
||||||
try {
|
try {
|
||||||
// pass an individual config in here since one config can not be reused!
|
// pass an individual config in here since one config can not be reused!
|
||||||
createIndex(config1, dir1, reader, docsInFirstIndex, false);
|
createIndex(config1, dir1, reader, docsInFirstIndex, false);
|
||||||
|
@ -124,7 +123,7 @@ public class PKIndexSplitter {
|
||||||
final int numDocs;
|
final int numDocs;
|
||||||
|
|
||||||
public DocumentFilteredAtomicIndexReader(AtomicReaderContext context, Filter preserveFilter, boolean negateFilter) throws IOException {
|
public DocumentFilteredAtomicIndexReader(AtomicReaderContext context, Filter preserveFilter, boolean negateFilter) throws IOException {
|
||||||
super(context.reader);
|
super(context.reader());
|
||||||
final int maxDoc = in.maxDoc();
|
final int maxDoc = in.maxDoc();
|
||||||
final FixedBitSet bits = new FixedBitSet(maxDoc);
|
final FixedBitSet bits = new FixedBitSet(maxDoc);
|
||||||
// ignore livedocs here, as we filter them later:
|
// ignore livedocs here, as we filter them later:
|
||||||
|
|
|
@ -17,6 +17,7 @@ package org.apache.lucene.misc;
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
import org.apache.lucene.index.AtomicReader;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
import org.apache.lucene.index.MultiFields;
|
import org.apache.lucene.index.MultiFields;
|
||||||
import org.apache.lucene.index.Fields;
|
import org.apache.lucene.index.Fields;
|
||||||
|
@ -187,7 +188,7 @@ public class HighFreqTerms {
|
||||||
new ReaderUtil.Gather(reader) {
|
new ReaderUtil.Gather(reader) {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void add(int base, IndexReader r) throws IOException {
|
protected void add(int base, AtomicReader r) throws IOException {
|
||||||
Bits liveDocs = r.getLiveDocs();
|
Bits liveDocs = r.getLiveDocs();
|
||||||
if (liveDocs == null) {
|
if (liveDocs == null) {
|
||||||
// TODO: we could do this up front, during the scan
|
// TODO: we could do this up front, during the scan
|
||||||
|
|
|
@ -60,7 +60,7 @@ public class TestIndexSplitter extends LuceneTestCase {
|
||||||
iw.addDocument(doc);
|
iw.addDocument(doc);
|
||||||
}
|
}
|
||||||
iw.commit();
|
iw.commit();
|
||||||
IndexReader iwReader = iw.getReader();
|
DirectoryReader iwReader = iw.getReader();
|
||||||
assertEquals(3, iwReader.getSequentialSubReaders().length);
|
assertEquals(3, iwReader.getSequentialSubReaders().length);
|
||||||
iwReader.close();
|
iwReader.close();
|
||||||
iw.close();
|
iw.close();
|
||||||
|
@ -69,7 +69,7 @@ public class TestIndexSplitter extends LuceneTestCase {
|
||||||
String splitSegName = is.infos.info(1).name;
|
String splitSegName = is.infos.info(1).name;
|
||||||
is.split(destDir, new String[] {splitSegName});
|
is.split(destDir, new String[] {splitSegName});
|
||||||
Directory fsDirDest = newFSDirectory(destDir);
|
Directory fsDirDest = newFSDirectory(destDir);
|
||||||
IndexReader r = IndexReader.open(fsDirDest);
|
DirectoryReader r = DirectoryReader.open(fsDirDest);
|
||||||
assertEquals(50, r.maxDoc());
|
assertEquals(50, r.maxDoc());
|
||||||
r.close();
|
r.close();
|
||||||
fsDirDest.close();
|
fsDirDest.close();
|
||||||
|
@ -81,14 +81,14 @@ public class TestIndexSplitter extends LuceneTestCase {
|
||||||
IndexSplitter.main(new String[] {dir.getAbsolutePath(), destDir2.getAbsolutePath(), splitSegName});
|
IndexSplitter.main(new String[] {dir.getAbsolutePath(), destDir2.getAbsolutePath(), splitSegName});
|
||||||
assertEquals(4, destDir2.listFiles().length);
|
assertEquals(4, destDir2.listFiles().length);
|
||||||
Directory fsDirDest2 = newFSDirectory(destDir2);
|
Directory fsDirDest2 = newFSDirectory(destDir2);
|
||||||
r = IndexReader.open(fsDirDest2);
|
r = DirectoryReader.open(fsDirDest2);
|
||||||
assertEquals(50, r.maxDoc());
|
assertEquals(50, r.maxDoc());
|
||||||
r.close();
|
r.close();
|
||||||
fsDirDest2.close();
|
fsDirDest2.close();
|
||||||
|
|
||||||
// now remove the copied segment from src
|
// now remove the copied segment from src
|
||||||
IndexSplitter.main(new String[] {dir.getAbsolutePath(), "-d", splitSegName});
|
IndexSplitter.main(new String[] {dir.getAbsolutePath(), "-d", splitSegName});
|
||||||
r = IndexReader.open(fsDir);
|
r = DirectoryReader.open(fsDir);
|
||||||
assertEquals(2, r.getSequentialSubReaders().length);
|
assertEquals(2, r.getSequentialSubReaders().length);
|
||||||
r.close();
|
r.close();
|
||||||
fsDir.close();
|
fsDir.close();
|
||||||
|
|
|
@ -17,7 +17,6 @@ package org.apache.lucene.sandbox.queries;
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import org.apache.lucene.index.*;
|
import org.apache.lucene.index.*;
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
|
||||||
import org.apache.lucene.search.DocIdSet;
|
import org.apache.lucene.search.DocIdSet;
|
||||||
import org.apache.lucene.search.Filter;
|
import org.apache.lucene.search.Filter;
|
||||||
import org.apache.lucene.util.Bits;
|
import org.apache.lucene.util.Bits;
|
||||||
|
@ -72,13 +71,13 @@ public class DuplicateFilter extends Filter {
|
||||||
@Override
|
@Override
|
||||||
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
|
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
|
||||||
if (processingMode == ProcessingMode.PM_FAST_INVALIDATION) {
|
if (processingMode == ProcessingMode.PM_FAST_INVALIDATION) {
|
||||||
return fastBits(context.reader, acceptDocs);
|
return fastBits(context.reader(), acceptDocs);
|
||||||
} else {
|
} else {
|
||||||
return correctBits(context.reader, acceptDocs);
|
return correctBits(context.reader(), acceptDocs);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private FixedBitSet correctBits(IndexReader reader, Bits acceptDocs) throws IOException {
|
private FixedBitSet correctBits(AtomicReader reader, Bits acceptDocs) throws IOException {
|
||||||
FixedBitSet bits = new FixedBitSet(reader.maxDoc()); //assume all are INvalid
|
FixedBitSet bits = new FixedBitSet(reader.maxDoc()); //assume all are INvalid
|
||||||
Terms terms = reader.fields().terms(fieldName);
|
Terms terms = reader.fields().terms(fieldName);
|
||||||
|
|
||||||
|
@ -115,7 +114,7 @@ public class DuplicateFilter extends Filter {
|
||||||
return bits;
|
return bits;
|
||||||
}
|
}
|
||||||
|
|
||||||
private FixedBitSet fastBits(IndexReader reader, Bits acceptDocs) throws IOException {
|
private FixedBitSet fastBits(AtomicReader reader, Bits acceptDocs) throws IOException {
|
||||||
FixedBitSet bits = new FixedBitSet(reader.maxDoc());
|
FixedBitSet bits = new FixedBitSet(reader.maxDoc());
|
||||||
bits.set(0, reader.maxDoc()); //assume all are valid
|
bits.set(0, reader.maxDoc()); //assume all are valid
|
||||||
Terms terms = reader.fields().terms(fieldName);
|
Terms terms = reader.fields().terms(fieldName);
|
||||||
|
|
|
@ -20,7 +20,7 @@ package org.apache.lucene.sandbox.queries;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.text.Collator;
|
import java.text.Collator;
|
||||||
|
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.search.FieldCache;
|
import org.apache.lucene.search.FieldCache;
|
||||||
import org.apache.lucene.search.FieldCache.DocTerms;
|
import org.apache.lucene.search.FieldCache.DocTerms;
|
||||||
import org.apache.lucene.search.FieldComparator;
|
import org.apache.lucene.search.FieldComparator;
|
||||||
|
@ -91,7 +91,7 @@ public final class SlowCollatedStringComparator extends FieldComparator<String>
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
||||||
currentDocTerms = FieldCache.DEFAULT.getTerms(context.reader, field);
|
currentDocTerms = FieldCache.DEFAULT.getTerms(context.reader(), field);
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,7 @@ package org.apache.lucene.spatial.geohash;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.search.FieldCache;
|
import org.apache.lucene.search.FieldCache;
|
||||||
import org.apache.lucene.search.FieldCache.DocTerms;
|
import org.apache.lucene.search.FieldCache.DocTerms;
|
||||||
import org.apache.lucene.search.Filter;
|
import org.apache.lucene.search.Filter;
|
||||||
|
@ -60,11 +60,11 @@ public class GeoHashDistanceFilter extends DistanceFilter {
|
||||||
@Override
|
@Override
|
||||||
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
|
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
|
||||||
|
|
||||||
final DocTerms geoHashValues = FieldCache.DEFAULT.getTerms(context.reader, geoHashField);
|
final DocTerms geoHashValues = FieldCache.DEFAULT.getTerms(context.reader(), geoHashField);
|
||||||
final BytesRef br = new BytesRef();
|
final BytesRef br = new BytesRef();
|
||||||
|
|
||||||
final int docBase = nextDocBase;
|
final int docBase = nextDocBase;
|
||||||
nextDocBase += context.reader.maxDoc();
|
nextDocBase += context.reader().maxDoc();
|
||||||
|
|
||||||
return new FilteredDocIdSet(startingFilter.getDocIdSet(context, acceptDocs)) {
|
return new FilteredDocIdSet(startingFilter.getDocIdSet(context, acceptDocs)) {
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -19,8 +19,8 @@ package org.apache.lucene.spatial.tier;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.index.DocsEnum;
|
import org.apache.lucene.index.DocsEnum;
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
|
||||||
import org.apache.lucene.search.Filter;
|
import org.apache.lucene.search.Filter;
|
||||||
import org.apache.lucene.search.DocIdSet;
|
import org.apache.lucene.search.DocIdSet;
|
||||||
import org.apache.lucene.search.DocIdSetIterator;
|
import org.apache.lucene.search.DocIdSetIterator;
|
||||||
|
@ -57,7 +57,7 @@ public class CartesianShapeFilter extends Filter {
|
||||||
return new DocIdSet() {
|
return new DocIdSet() {
|
||||||
@Override
|
@Override
|
||||||
public DocIdSetIterator iterator() throws IOException {
|
public DocIdSetIterator iterator() throws IOException {
|
||||||
return context.reader.termDocsEnum(acceptDocs, fieldName, bytesRef, false);
|
return context.reader().termDocsEnum(acceptDocs, fieldName, bytesRef, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -66,11 +66,11 @@ public class CartesianShapeFilter extends Filter {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
} else {
|
} else {
|
||||||
final FixedBitSet bits = new FixedBitSet(context.reader.maxDoc());
|
final FixedBitSet bits = new FixedBitSet(context.reader().maxDoc());
|
||||||
for (int i =0; i< sz; i++) {
|
for (int i =0; i< sz; i++) {
|
||||||
double boxId = area.get(i).doubleValue();
|
double boxId = area.get(i).doubleValue();
|
||||||
NumericUtils.longToPrefixCoded(NumericUtils.doubleToSortableLong(boxId), 0, bytesRef);
|
NumericUtils.longToPrefixCoded(NumericUtils.doubleToSortableLong(boxId), 0, bytesRef);
|
||||||
final DocsEnum docsEnum = context.reader.termDocsEnum(acceptDocs, fieldName, bytesRef, false);
|
final DocsEnum docsEnum = context.reader().termDocsEnum(acceptDocs, fieldName, bytesRef, false);
|
||||||
if (docsEnum == null) continue;
|
if (docsEnum == null) continue;
|
||||||
// iterate through all documents
|
// iterate through all documents
|
||||||
// which have this boxId
|
// which have this boxId
|
||||||
|
|
|
@ -19,7 +19,7 @@ package org.apache.lucene.spatial.tier;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.search.Filter;
|
import org.apache.lucene.search.Filter;
|
||||||
import org.apache.lucene.search.FieldComparator;
|
import org.apache.lucene.search.FieldComparator;
|
||||||
import org.apache.lucene.search.FieldComparatorSource;
|
import org.apache.lucene.search.FieldComparatorSource;
|
||||||
|
|
|
@ -19,7 +19,7 @@ package org.apache.lucene.spatial.tier;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.search.FilteredDocIdSet;
|
import org.apache.lucene.search.FilteredDocIdSet;
|
||||||
import org.apache.lucene.search.FieldCache;
|
import org.apache.lucene.search.FieldCache;
|
||||||
import org.apache.lucene.search.Filter;
|
import org.apache.lucene.search.Filter;
|
||||||
|
@ -63,11 +63,11 @@ public class LatLongDistanceFilter extends DistanceFilter {
|
||||||
@Override
|
@Override
|
||||||
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
|
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
|
||||||
|
|
||||||
final double[] latIndex = FieldCache.DEFAULT.getDoubles(context.reader, latField, false);
|
final double[] latIndex = FieldCache.DEFAULT.getDoubles(context.reader(), latField, false);
|
||||||
final double[] lngIndex = FieldCache.DEFAULT.getDoubles(context.reader, lngField, false);
|
final double[] lngIndex = FieldCache.DEFAULT.getDoubles(context.reader(), lngField, false);
|
||||||
|
|
||||||
final int docBase = nextDocBase;
|
final int docBase = nextDocBase;
|
||||||
nextDocBase += context.reader.maxDoc();
|
nextDocBase += context.reader().maxDoc();
|
||||||
|
|
||||||
return new FilteredDocIdSet(startingFilter.getDocIdSet(context, acceptDocs)) {
|
return new FilteredDocIdSet(startingFilter.getDocIdSet(context, acceptDocs)) {
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -23,8 +23,8 @@ import org.apache.lucene.document.Document;
|
||||||
import org.apache.lucene.document.FieldType;
|
import org.apache.lucene.document.FieldType;
|
||||||
import org.apache.lucene.document.NumericField;
|
import org.apache.lucene.document.NumericField;
|
||||||
import org.apache.lucene.document.TextField;
|
import org.apache.lucene.document.TextField;
|
||||||
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.index.FieldInfo.IndexOptions;
|
import org.apache.lucene.index.FieldInfo.IndexOptions;
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
import org.apache.lucene.index.IndexWriter;
|
import org.apache.lucene.index.IndexWriter;
|
||||||
import org.apache.lucene.index.Term;
|
import org.apache.lucene.index.Term;
|
||||||
|
@ -119,7 +119,7 @@ public class TestDistance extends LuceneTestCase {
|
||||||
|
|
||||||
AtomicReaderContext[] leaves = ReaderUtil.leaves(r.getTopReaderContext());
|
AtomicReaderContext[] leaves = ReaderUtil.leaves(r.getTopReaderContext());
|
||||||
for (int i = 0; i < leaves.length; i++) {
|
for (int i = 0; i < leaves.length; i++) {
|
||||||
f.getDocIdSet(leaves[i], leaves[i].reader.getLiveDocs());
|
f.getDocIdSet(leaves[i], leaves[i].reader().getLiveDocs());
|
||||||
}
|
}
|
||||||
r.close();
|
r.close();
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,7 @@ import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.lucene.index.FieldInfo;
|
import org.apache.lucene.index.FieldInfo;
|
||||||
import org.apache.lucene.index.DocValues;
|
import org.apache.lucene.index.DocValues;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.AtomicReader;
|
||||||
import org.apache.lucene.index.MergeState;
|
import org.apache.lucene.index.MergeState;
|
||||||
import org.apache.lucene.index.DocValues.Type;
|
import org.apache.lucene.index.DocValues.Type;
|
||||||
|
|
||||||
|
@ -65,13 +65,13 @@ public abstract class PerDocConsumer implements Closeable {
|
||||||
/**
|
/**
|
||||||
* Returns a {@link DocValues} instance for merging from the given reader for the given
|
* Returns a {@link DocValues} instance for merging from the given reader for the given
|
||||||
* {@link FieldInfo}. This method is used for merging and uses
|
* {@link FieldInfo}. This method is used for merging and uses
|
||||||
* {@link IndexReader#docValues(String)} by default.
|
* {@link AtomicReader#docValues(String)} by default.
|
||||||
* <p>
|
* <p>
|
||||||
* To enable {@link DocValues} merging for different {@link DocValues} than
|
* To enable {@link DocValues} merging for different {@link DocValues} than
|
||||||
* the default override this method accordingly.
|
* the default override this method accordingly.
|
||||||
* <p>
|
* <p>
|
||||||
*/
|
*/
|
||||||
protected DocValues getDocValuesForMerge(IndexReader reader, FieldInfo info) throws IOException {
|
protected DocValues getDocValuesForMerge(AtomicReader reader, FieldInfo info) throws IOException {
|
||||||
return reader.docValues(info.name);
|
return reader.docValues(info.name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -26,7 +26,7 @@ import org.apache.lucene.index.DocValues.Type;
|
||||||
import org.apache.lucene.index.FieldInfo;
|
import org.apache.lucene.index.FieldInfo;
|
||||||
import org.apache.lucene.index.FieldInfos;
|
import org.apache.lucene.index.FieldInfos;
|
||||||
import org.apache.lucene.index.IndexFileNames;
|
import org.apache.lucene.index.IndexFileNames;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.AtomicReader;
|
||||||
import org.apache.lucene.index.PerDocWriteState;
|
import org.apache.lucene.index.PerDocWriteState;
|
||||||
import org.apache.lucene.index.SegmentInfo;
|
import org.apache.lucene.index.SegmentInfo;
|
||||||
import org.apache.lucene.index.SegmentReadState;
|
import org.apache.lucene.index.SegmentReadState;
|
||||||
|
@ -85,7 +85,7 @@ public class Lucene40NormsFormat extends NormsFormat {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected DocValues getDocValuesForMerge(IndexReader reader, FieldInfo info)
|
protected DocValues getDocValuesForMerge(AtomicReader reader, FieldInfo info)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
return reader.normValues(info.name);
|
return reader.normValues(info.name);
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,7 +28,7 @@ import org.apache.lucene.index.DocValues;
|
||||||
import org.apache.lucene.index.FieldInfo;
|
import org.apache.lucene.index.FieldInfo;
|
||||||
import org.apache.lucene.index.FieldInfos;
|
import org.apache.lucene.index.FieldInfos;
|
||||||
import org.apache.lucene.index.IndexFileNames;
|
import org.apache.lucene.index.IndexFileNames;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.AtomicReader;
|
||||||
import org.apache.lucene.index.IndexableField;
|
import org.apache.lucene.index.IndexableField;
|
||||||
import org.apache.lucene.index.SegmentInfo;
|
import org.apache.lucene.index.SegmentInfo;
|
||||||
import org.apache.lucene.store.Directory;
|
import org.apache.lucene.store.Directory;
|
||||||
|
@ -87,7 +87,7 @@ public class SimpleTextNormsConsumer extends PerDocConsumer {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected DocValues getDocValuesForMerge(IndexReader reader, FieldInfo info)
|
protected DocValues getDocValuesForMerge(AtomicReader reader, FieldInfo info)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
return reader.normValues(info.name);
|
return reader.normValues(info.name);
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,248 @@
|
||||||
|
package org.apache.lucene.index;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.lucene.search.SearcherManager; // javadocs
|
||||||
|
import org.apache.lucene.store.*;
|
||||||
|
import org.apache.lucene.util.Bits;
|
||||||
|
import org.apache.lucene.util.BytesRef;
|
||||||
|
import org.apache.lucene.util.ReaderUtil; // for javadocs
|
||||||
|
|
||||||
|
/** {@code AtomicReader} is an abstract class, providing an interface for accessing an
|
||||||
|
index. Search of an index is done entirely through this abstract interface,
|
||||||
|
so that any subclass which implements it is searchable. IndexReaders implemented
|
||||||
|
by this subclass do not consist of several sub-readers,
|
||||||
|
they are atomic. They support retrieval of stored fields, doc values, terms,
|
||||||
|
and postings.
|
||||||
|
|
||||||
|
<p>For efficiency, in this API documents are often referred to via
|
||||||
|
<i>document numbers</i>, non-negative integers which each name a unique
|
||||||
|
document in the index. These document numbers are ephemeral -- they may change
|
||||||
|
as documents are added to and deleted from an index. Clients should thus not
|
||||||
|
rely on a given document having the same number between sessions.
|
||||||
|
|
||||||
|
<p>
|
||||||
|
<a name="thread-safety"></a><p><b>NOTE</b>: {@link
|
||||||
|
IndexReader} instances are completely thread
|
||||||
|
safe, meaning multiple threads can call any of its methods,
|
||||||
|
concurrently. If your application requires external
|
||||||
|
synchronization, you should <b>not</b> synchronize on the
|
||||||
|
<code>IndexReader</code> instance; use your own
|
||||||
|
(non-Lucene) objects instead.
|
||||||
|
*/
|
||||||
|
public abstract class AtomicReader extends IndexReader {
|
||||||
|
|
||||||
|
private final AtomicReaderContext readerContext = new AtomicReaderContext(this);
|
||||||
|
|
||||||
|
protected AtomicReader() {
|
||||||
|
super();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public final AtomicReaderContext getTopReaderContext() {
|
||||||
|
ensureOpen();
|
||||||
|
return readerContext;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Returns true if there are norms stored for this field. */
|
||||||
|
public boolean hasNorms(String field) throws IOException {
|
||||||
|
// backward compatible implementation.
|
||||||
|
// SegmentReader has an efficient implementation.
|
||||||
|
ensureOpen();
|
||||||
|
return normValues(field) != null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns {@link Fields} for this reader.
|
||||||
|
* This method may return null if the reader has no
|
||||||
|
* postings.
|
||||||
|
*/
|
||||||
|
public abstract Fields fields() throws IOException;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public final int docFreq(String field, BytesRef term) throws IOException {
|
||||||
|
final Fields fields = fields();
|
||||||
|
if (fields == null) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
final Terms terms = fields.terms(field);
|
||||||
|
if (terms == null) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
final TermsEnum termsEnum = terms.iterator(null);
|
||||||
|
if (termsEnum.seekExact(term, true)) {
|
||||||
|
return termsEnum.docFreq();
|
||||||
|
} else {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Returns the number of documents containing the term
|
||||||
|
* <code>t</code>. This method returns 0 if the term or
|
||||||
|
* field does not exists. This method does not take into
|
||||||
|
* account deleted documents that have not yet been merged
|
||||||
|
* away. */
|
||||||
|
public final long totalTermFreq(String field, BytesRef term) throws IOException {
|
||||||
|
final Fields fields = fields();
|
||||||
|
if (fields == null) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
final Terms terms = fields.terms(field);
|
||||||
|
if (terms == null) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
final TermsEnum termsEnum = terms.iterator(null);
|
||||||
|
if (termsEnum.seekExact(term, true)) {
|
||||||
|
return termsEnum.totalTermFreq();
|
||||||
|
} else {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** This may return null if the field does not exist.*/
|
||||||
|
public final Terms terms(String field) throws IOException {
|
||||||
|
final Fields fields = fields();
|
||||||
|
if (fields == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
return fields.terms(field);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Returns {@link DocsEnum} for the specified field &
|
||||||
|
* term. This may return null, if either the field or
|
||||||
|
* term does not exist. */
|
||||||
|
public final DocsEnum termDocsEnum(Bits liveDocs, String field, BytesRef term, boolean needsFreqs) throws IOException {
|
||||||
|
assert field != null;
|
||||||
|
assert term != null;
|
||||||
|
final Fields fields = fields();
|
||||||
|
if (fields != null) {
|
||||||
|
final Terms terms = fields.terms(field);
|
||||||
|
if (terms != null) {
|
||||||
|
final TermsEnum termsEnum = terms.iterator(null);
|
||||||
|
if (termsEnum.seekExact(term, true)) {
|
||||||
|
return termsEnum.docs(liveDocs, null, needsFreqs);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Returns {@link DocsAndPositionsEnum} for the specified
|
||||||
|
* field & term. This may return null, if either the
|
||||||
|
* field or term does not exist, or needsOffsets is
|
||||||
|
* true but offsets were not indexed for this field. */
|
||||||
|
public final DocsAndPositionsEnum termPositionsEnum(Bits liveDocs, String field, BytesRef term, boolean needsOffsets) throws IOException {
|
||||||
|
assert field != null;
|
||||||
|
assert term != null;
|
||||||
|
final Fields fields = fields();
|
||||||
|
if (fields != null) {
|
||||||
|
final Terms terms = fields.terms(field);
|
||||||
|
if (terms != null) {
|
||||||
|
final TermsEnum termsEnum = terms.iterator(null);
|
||||||
|
if (termsEnum.seekExact(term, true)) {
|
||||||
|
return termsEnum.docsAndPositions(liveDocs, null, needsOffsets);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns {@link DocsEnum} for the specified field and
|
||||||
|
* {@link TermState}. This may return null, if either the field or the term
|
||||||
|
* does not exists or the {@link TermState} is invalid for the underlying
|
||||||
|
* implementation.*/
|
||||||
|
public final DocsEnum termDocsEnum(Bits liveDocs, String field, BytesRef term, TermState state, boolean needsFreqs) throws IOException {
|
||||||
|
assert state != null;
|
||||||
|
assert field != null;
|
||||||
|
final Fields fields = fields();
|
||||||
|
if (fields != null) {
|
||||||
|
final Terms terms = fields.terms(field);
|
||||||
|
if (terms != null) {
|
||||||
|
final TermsEnum termsEnum = terms.iterator(null);
|
||||||
|
termsEnum.seekExact(term, state);
|
||||||
|
return termsEnum.docs(liveDocs, null, needsFreqs);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns {@link DocsAndPositionsEnum} for the specified field and
|
||||||
|
* {@link TermState}. This may return null, if either the field or the term
|
||||||
|
* does not exists, the {@link TermState} is invalid for the underlying
|
||||||
|
* implementation, or needsOffsets is true but offsets
|
||||||
|
* were not indexed for this field. */
|
||||||
|
public final DocsAndPositionsEnum termPositionsEnum(Bits liveDocs, String field, BytesRef term, TermState state, boolean needsOffsets) throws IOException {
|
||||||
|
assert state != null;
|
||||||
|
assert field != null;
|
||||||
|
final Fields fields = fields();
|
||||||
|
if (fields != null) {
|
||||||
|
final Terms terms = fields.terms(field);
|
||||||
|
if (terms != null) {
|
||||||
|
final TermsEnum termsEnum = terms.iterator(null);
|
||||||
|
termsEnum.seekExact(term, state);
|
||||||
|
return termsEnum.docsAndPositions(liveDocs, null, needsOffsets);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Returns the number of unique terms (across all fields)
|
||||||
|
* in this reader.
|
||||||
|
*/
|
||||||
|
public final long getUniqueTermCount() throws IOException {
|
||||||
|
final Fields fields = fields();
|
||||||
|
if (fields == null) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
return fields.getUniqueTermCount();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns {@link DocValues} for this field.
|
||||||
|
* This method may return null if the reader has no per-document
|
||||||
|
* values stored.
|
||||||
|
*/
|
||||||
|
public abstract DocValues docValues(String field) throws IOException;
|
||||||
|
|
||||||
|
public abstract DocValues normValues(String field) throws IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the {@link FieldInfos} describing all fields in
|
||||||
|
* this reader. NOTE: do not make any changes to the
|
||||||
|
* returned FieldInfos!
|
||||||
|
*
|
||||||
|
* @lucene.experimental
|
||||||
|
*/
|
||||||
|
public abstract FieldInfos getFieldInfos();
|
||||||
|
|
||||||
|
/** Returns the {@link Bits} representing live (not
|
||||||
|
* deleted) docs. A set bit indicates the doc ID has not
|
||||||
|
* been deleted. If this method returns null it means
|
||||||
|
* there are no deleted documents (all documents are
|
||||||
|
* live).
|
||||||
|
*
|
||||||
|
* The returned instance has been safely published for
|
||||||
|
* use by multiple threads without additional
|
||||||
|
* synchronization.
|
||||||
|
*/
|
||||||
|
public abstract Bits getLiveDocs();
|
||||||
|
}
|
|
@ -0,0 +1,61 @@
|
||||||
|
package org.apache.lucene.index;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* {@link IndexReaderContext} for {@link AtomicReader} instances
|
||||||
|
* @lucene.experimental
|
||||||
|
*/
|
||||||
|
public final class AtomicReaderContext extends IndexReaderContext {
|
||||||
|
/** The readers ord in the top-level's leaves array */
|
||||||
|
public final int ord;
|
||||||
|
/** The readers absolute doc base */
|
||||||
|
public final int docBase;
|
||||||
|
|
||||||
|
private final AtomicReader reader;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new {@link AtomicReaderContext}
|
||||||
|
*/
|
||||||
|
AtomicReaderContext(CompositeReaderContext parent, AtomicReader reader,
|
||||||
|
int ord, int docBase, int leafOrd, int leafDocBase) {
|
||||||
|
super(parent, ord, docBase);
|
||||||
|
this.ord = leafOrd;
|
||||||
|
this.docBase = leafDocBase;
|
||||||
|
this.reader = reader;
|
||||||
|
}
|
||||||
|
|
||||||
|
AtomicReaderContext(AtomicReader atomicReader) {
|
||||||
|
this(null, atomicReader, 0, 0, 0, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public AtomicReaderContext[] leaves() {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public IndexReaderContext[] children() {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public AtomicReader reader() {
|
||||||
|
return reader;
|
||||||
|
}
|
||||||
|
}
|
|
@ -23,10 +23,9 @@ import org.apache.lucene.util.Bits;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.apache.lucene.util.ReaderUtil;
|
import org.apache.lucene.util.ReaderUtil;
|
||||||
|
|
||||||
abstract class BaseMultiReader<R extends IndexReader> extends IndexReader {
|
abstract class BaseMultiReader<R extends IndexReader> extends CompositeReader {
|
||||||
protected final R[] subReaders;
|
protected final R[] subReaders;
|
||||||
protected final int[] starts; // 1st docno for each segment
|
protected final int[] starts; // 1st docno for each segment
|
||||||
private final ReaderContext topLevelContext;
|
|
||||||
private final int maxDoc;
|
private final int maxDoc;
|
||||||
private final int numDocs;
|
private final int numDocs;
|
||||||
private final boolean hasDeletions;
|
private final boolean hasDeletions;
|
||||||
|
@ -40,7 +39,6 @@ abstract class BaseMultiReader<R extends IndexReader> extends IndexReader {
|
||||||
starts[i] = maxDoc;
|
starts[i] = maxDoc;
|
||||||
maxDoc += subReaders[i].maxDoc(); // compute maxDocs
|
maxDoc += subReaders[i].maxDoc(); // compute maxDocs
|
||||||
numDocs += subReaders[i].numDocs(); // compute numDocs
|
numDocs += subReaders[i].numDocs(); // compute numDocs
|
||||||
|
|
||||||
if (subReaders[i].hasDeletions()) {
|
if (subReaders[i].hasDeletions()) {
|
||||||
hasDeletions = true;
|
hasDeletions = true;
|
||||||
}
|
}
|
||||||
|
@ -49,25 +47,6 @@ abstract class BaseMultiReader<R extends IndexReader> extends IndexReader {
|
||||||
this.maxDoc = maxDoc;
|
this.maxDoc = maxDoc;
|
||||||
this.numDocs = numDocs;
|
this.numDocs = numDocs;
|
||||||
this.hasDeletions = hasDeletions;
|
this.hasDeletions = hasDeletions;
|
||||||
topLevelContext = ReaderUtil.buildReaderContext(this);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public FieldInfos getFieldInfos() {
|
|
||||||
throw new UnsupportedOperationException("call getFieldInfos() on each sub reader, or use ReaderUtil.getMergedFieldInfos, instead");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Fields fields() throws IOException {
|
|
||||||
throw new UnsupportedOperationException("please use MultiFields.getFields, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level Fields");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
protected abstract IndexReader doOpenIfChanged() throws CorruptIndexException, IOException;
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Bits getLiveDocs() {
|
|
||||||
throw new UnsupportedOperationException("please use MultiFields.getLiveDocs, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level Bits liveDocs");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -102,23 +81,6 @@ abstract class BaseMultiReader<R extends IndexReader> extends IndexReader {
|
||||||
return hasDeletions;
|
return hasDeletions;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Helper method for subclasses to get the corresponding reader for a doc ID */
|
|
||||||
protected final int readerIndex(int docID) {
|
|
||||||
if (docID < 0 || docID >= maxDoc) {
|
|
||||||
throw new IllegalArgumentException("docID must be >= 0 and < maxDoc=" + maxDoc + " (got docID=" + docID + ")");
|
|
||||||
}
|
|
||||||
return ReaderUtil.subIndex(docID, this.starts);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean hasNorms(String field) throws IOException {
|
|
||||||
ensureOpen();
|
|
||||||
for (int i = 0; i < subReaders.length; i++) {
|
|
||||||
if (subReaders[i].hasNorms(field)) return true;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int docFreq(String field, BytesRef t) throws IOException {
|
public int docFreq(String field, BytesRef t) throws IOException {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
|
@ -129,23 +91,16 @@ abstract class BaseMultiReader<R extends IndexReader> extends IndexReader {
|
||||||
return total;
|
return total;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Helper method for subclasses to get the corresponding reader for a doc ID */
|
||||||
|
protected final int readerIndex(int docID) {
|
||||||
|
if (docID < 0 || docID >= maxDoc) {
|
||||||
|
throw new IllegalArgumentException("docID must be >= 0 and < maxDoc=" + maxDoc + " (got docID=" + docID + ")");
|
||||||
|
}
|
||||||
|
return ReaderUtil.subIndex(docID, this.starts);
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public IndexReader[] getSequentialSubReaders() {
|
public IndexReader[] getSequentialSubReaders() {
|
||||||
return subReaders;
|
return subReaders;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public ReaderContext getTopReaderContext() {
|
|
||||||
return topLevelContext;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public DocValues docValues(String field) throws IOException {
|
|
||||||
throw new UnsupportedOperationException("please use MultiDocValues#getDocValues, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level DocValues");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public DocValues normValues(String field) throws IOException {
|
|
||||||
throw new UnsupportedOperationException("please use MultiDocValues#getNormValues, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level Norm DocValues ");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,7 +25,6 @@ import java.util.Collections;
|
||||||
import java.util.concurrent.atomic.AtomicInteger;
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
import java.util.concurrent.atomic.AtomicLong;
|
import java.util.concurrent.atomic.AtomicLong;
|
||||||
|
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
|
||||||
import org.apache.lucene.search.DocIdSet;
|
import org.apache.lucene.search.DocIdSet;
|
||||||
import org.apache.lucene.search.DocIdSetIterator;
|
import org.apache.lucene.search.DocIdSetIterator;
|
||||||
import org.apache.lucene.search.Query;
|
import org.apache.lucene.search.Query;
|
||||||
|
@ -435,14 +434,14 @@ class BufferedDeletesStream {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete by query
|
// Delete by query
|
||||||
private static long applyQueryDeletes(Iterable<QueryAndLimit> queriesIter, IndexWriter.ReadersAndLiveDocs rld, SegmentReader reader) throws IOException {
|
private static long applyQueryDeletes(Iterable<QueryAndLimit> queriesIter, IndexWriter.ReadersAndLiveDocs rld, final SegmentReader reader) throws IOException {
|
||||||
long delCount = 0;
|
long delCount = 0;
|
||||||
final AtomicReaderContext readerContext = (AtomicReaderContext) reader.getTopReaderContext();
|
final AtomicReaderContext readerContext = reader.getTopReaderContext();
|
||||||
boolean any = false;
|
boolean any = false;
|
||||||
for (QueryAndLimit ent : queriesIter) {
|
for (QueryAndLimit ent : queriesIter) {
|
||||||
Query query = ent.query;
|
Query query = ent.query;
|
||||||
int limit = ent.limit;
|
int limit = ent.limit;
|
||||||
final DocIdSet docs = new QueryWrapperFilter(query).getDocIdSet(readerContext, readerContext.reader.getLiveDocs());
|
final DocIdSet docs = new QueryWrapperFilter(query).getDocIdSet(readerContext, reader.getLiveDocs());
|
||||||
if (docs != null) {
|
if (docs != null) {
|
||||||
final DocIdSetIterator it = docs.iterator();
|
final DocIdSetIterator it = docs.iterator();
|
||||||
if (it != null) {
|
if (it != null) {
|
||||||
|
|
|
@ -537,7 +537,7 @@ public class CheckIndex {
|
||||||
}
|
}
|
||||||
if (infoStream != null)
|
if (infoStream != null)
|
||||||
infoStream.print(" test: open reader.........");
|
infoStream.print(" test: open reader.........");
|
||||||
reader = new SegmentReader(info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR, IOContext.DEFAULT);
|
reader = new SegmentReader(info, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, IOContext.DEFAULT);
|
||||||
|
|
||||||
segInfoStat.openReaderPassed = true;
|
segInfoStat.openReaderPassed = true;
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,97 @@
|
||||||
|
package org.apache.lucene.index;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import org.apache.lucene.search.SearcherManager; // javadocs
|
||||||
|
import org.apache.lucene.store.*;
|
||||||
|
|
||||||
|
/** Instances of this reader type can only
|
||||||
|
be used to get stored fields from the underlying AtomicReaders,
|
||||||
|
but it is not possible to directly retrieve postings. To do that, get
|
||||||
|
the sub-readers via {@link #getSequentialSubReaders}.
|
||||||
|
Alternatively, you can mimic an {@link AtomicReader} (with a serious slowdown),
|
||||||
|
by wrapping composite readers with {@link SlowCompositeReaderWrapper}.
|
||||||
|
|
||||||
|
<p>IndexReader instances for indexes on disk are usually constructed
|
||||||
|
with a call to one of the static <code>DirectoryReader,open()</code> methods,
|
||||||
|
e.g. {@link DirectoryReader#open(Directory)}. {@link DirectoryReader} implements
|
||||||
|
the {@code CompositeReader} interface, it is not possible to directly get postings.
|
||||||
|
<p> Concrete subclasses of IndexReader are usually constructed with a call to
|
||||||
|
one of the static <code>open()</code> methods, e.g. {@link
|
||||||
|
#open(Directory)}.
|
||||||
|
|
||||||
|
<p> For efficiency, in this API documents are often referred to via
|
||||||
|
<i>document numbers</i>, non-negative integers which each name a unique
|
||||||
|
document in the index. These document numbers are ephemeral -- they may change
|
||||||
|
as documents are added to and deleted from an index. Clients should thus not
|
||||||
|
rely on a given document having the same number between sessions.
|
||||||
|
|
||||||
|
<p>
|
||||||
|
<a name="thread-safety"></a><p><b>NOTE</b>: {@link
|
||||||
|
IndexReader} instances are completely thread
|
||||||
|
safe, meaning multiple threads can call any of its methods,
|
||||||
|
concurrently. If your application requires external
|
||||||
|
synchronization, you should <b>not</b> synchronize on the
|
||||||
|
<code>IndexReader</code> instance; use your own
|
||||||
|
(non-Lucene) objects instead.
|
||||||
|
*/
|
||||||
|
public abstract class CompositeReader extends IndexReader {
|
||||||
|
|
||||||
|
private volatile CompositeReaderContext readerContext = null; // lazy init
|
||||||
|
|
||||||
|
protected CompositeReader() {
|
||||||
|
super();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
final StringBuilder buffer = new StringBuilder();
|
||||||
|
buffer.append(getClass().getSimpleName());
|
||||||
|
buffer.append('(');
|
||||||
|
final IndexReader[] subReaders = getSequentialSubReaders();
|
||||||
|
assert subReaders != null;
|
||||||
|
if (subReaders.length > 0) {
|
||||||
|
buffer.append(subReaders[0]);
|
||||||
|
for (int i = 1; i < subReaders.length; ++i) {
|
||||||
|
buffer.append(" ").append(subReaders[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
buffer.append(')');
|
||||||
|
return buffer.toString();
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Expert: returns the sequential sub readers that this
|
||||||
|
* reader is logically composed of. It contrast to previous
|
||||||
|
* Lucene versions may not return null.
|
||||||
|
* If this method returns an empty array, that means this
|
||||||
|
* reader is a null reader (for example a MultiReader
|
||||||
|
* that has no sub readers).
|
||||||
|
*/
|
||||||
|
public abstract IndexReader[] getSequentialSubReaders();
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public final CompositeReaderContext getTopReaderContext() {
|
||||||
|
ensureOpen();
|
||||||
|
// lazy init without thread safety for perf reasons: Building the readerContext twice does not hurt!
|
||||||
|
if (readerContext == null) {
|
||||||
|
assert getSequentialSubReaders() != null;
|
||||||
|
readerContext = CompositeReaderContext.create(this);
|
||||||
|
}
|
||||||
|
return readerContext;
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,136 @@
|
||||||
|
package org.apache.lucene.index;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import org.apache.lucene.util.ReaderUtil;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* {@link IndexReaderContext} for {@link CompositeReader} instance.
|
||||||
|
* @lucene.experimental
|
||||||
|
*/
|
||||||
|
public final class CompositeReaderContext extends IndexReaderContext {
|
||||||
|
private final IndexReaderContext[] children;
|
||||||
|
private final AtomicReaderContext[] leaves;
|
||||||
|
private final CompositeReader reader;
|
||||||
|
|
||||||
|
static CompositeReaderContext create(CompositeReader reader) {
|
||||||
|
return new Builder(reader).build();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a {@link CompositeReaderContext} for intermediate readers that aren't
|
||||||
|
* not top-level readers in the current context
|
||||||
|
*/
|
||||||
|
CompositeReaderContext(CompositeReaderContext parent, CompositeReader reader,
|
||||||
|
int ordInParent, int docbaseInParent, IndexReaderContext[] children) {
|
||||||
|
this(parent, reader, ordInParent, docbaseInParent, children, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a {@link CompositeReaderContext} for top-level readers with parent set to <code>null</code>
|
||||||
|
*/
|
||||||
|
CompositeReaderContext(CompositeReader reader, IndexReaderContext[] children, AtomicReaderContext[] leaves) {
|
||||||
|
this(null, reader, 0, 0, children, leaves);
|
||||||
|
}
|
||||||
|
|
||||||
|
private CompositeReaderContext(CompositeReaderContext parent, CompositeReader reader,
|
||||||
|
int ordInParent, int docbaseInParent, IndexReaderContext[] children,
|
||||||
|
AtomicReaderContext[] leaves) {
|
||||||
|
super(parent, ordInParent, docbaseInParent);
|
||||||
|
this.children = children;
|
||||||
|
this.leaves = leaves;
|
||||||
|
this.reader = reader;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public AtomicReaderContext[] leaves() {
|
||||||
|
return leaves;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public IndexReaderContext[] children() {
|
||||||
|
return children;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public CompositeReader reader() {
|
||||||
|
return reader;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static final class Builder {
|
||||||
|
private final CompositeReader reader;
|
||||||
|
private final AtomicReaderContext[] leaves;
|
||||||
|
private int leafOrd = 0;
|
||||||
|
private int leafDocBase = 0;
|
||||||
|
|
||||||
|
public Builder(CompositeReader reader) {
|
||||||
|
this.reader = reader;
|
||||||
|
leaves = new AtomicReaderContext[numLeaves(reader)];
|
||||||
|
}
|
||||||
|
|
||||||
|
public CompositeReaderContext build() {
|
||||||
|
return (CompositeReaderContext) build(null, reader, 0, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
private IndexReaderContext build(CompositeReaderContext parent, IndexReader reader, int ord, int docBase) {
|
||||||
|
if (reader instanceof AtomicReader) {
|
||||||
|
final AtomicReader ar = (AtomicReader) reader;
|
||||||
|
final AtomicReaderContext atomic = new AtomicReaderContext(parent, ar, ord, docBase, leafOrd, leafDocBase);
|
||||||
|
leaves[leafOrd++] = atomic;
|
||||||
|
leafDocBase += reader.maxDoc();
|
||||||
|
return atomic;
|
||||||
|
} else {
|
||||||
|
final CompositeReader cr = (CompositeReader) reader;
|
||||||
|
final IndexReader[] sequentialSubReaders = cr.getSequentialSubReaders();
|
||||||
|
final IndexReaderContext[] children = new IndexReaderContext[sequentialSubReaders.length];
|
||||||
|
final CompositeReaderContext newParent;
|
||||||
|
if (parent == null) {
|
||||||
|
newParent = new CompositeReaderContext(cr, children, leaves);
|
||||||
|
} else {
|
||||||
|
newParent = new CompositeReaderContext(parent, cr, ord, docBase, children);
|
||||||
|
}
|
||||||
|
int newDocBase = 0;
|
||||||
|
for (int i = 0; i < sequentialSubReaders.length; i++) {
|
||||||
|
children[i] = build(newParent, sequentialSubReaders[i], i, newDocBase);
|
||||||
|
newDocBase += sequentialSubReaders[i].maxDoc();
|
||||||
|
}
|
||||||
|
return newParent;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private int numLeaves(IndexReader reader) {
|
||||||
|
final int[] numLeaves = new int[1];
|
||||||
|
try {
|
||||||
|
new ReaderUtil.Gather(reader) {
|
||||||
|
@Override
|
||||||
|
protected void add(int base, AtomicReader r) {
|
||||||
|
numLeaves[0]++;
|
||||||
|
}
|
||||||
|
}.run();
|
||||||
|
} catch (IOException ioe) {
|
||||||
|
// won't happen
|
||||||
|
throw new RuntimeException(ioe);
|
||||||
|
}
|
||||||
|
return numLeaves[0];
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -26,20 +26,126 @@ import java.util.HashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
|
import org.apache.lucene.search.SearcherManager; // javadocs
|
||||||
import org.apache.lucene.store.Directory;
|
import org.apache.lucene.store.Directory;
|
||||||
import org.apache.lucene.store.IOContext;
|
import org.apache.lucene.store.IOContext;
|
||||||
import org.apache.lucene.util.IOUtils;
|
import org.apache.lucene.util.IOUtils;
|
||||||
|
|
||||||
/**
|
/** DirectoryReader is an implementation of {@link CompositeReader}
|
||||||
* An IndexReader which reads indexes with multiple segments.
|
that can read indexes in a {@link Directory}.
|
||||||
*/
|
|
||||||
final class DirectoryReader extends BaseMultiReader<SegmentReader> {
|
<p>DirectoryReader instances are usually constructed with a call to
|
||||||
|
one of the static <code>open()</code> methods, e.g. {@link
|
||||||
|
#open(Directory)}.
|
||||||
|
|
||||||
|
<p> For efficiency, in this API documents are often referred to via
|
||||||
|
<i>document numbers</i>, non-negative integers which each name a unique
|
||||||
|
document in the index. These document numbers are ephemeral -- they may change
|
||||||
|
as documents are added to and deleted from an index. Clients should thus not
|
||||||
|
rely on a given document having the same number between sessions.
|
||||||
|
|
||||||
|
<p>
|
||||||
|
<a name="thread-safety"></a><p><b>NOTE</b>: {@link
|
||||||
|
IndexReader} instances are completely thread
|
||||||
|
safe, meaning multiple threads can call any of its methods,
|
||||||
|
concurrently. If your application requires external
|
||||||
|
synchronization, you should <b>not</b> synchronize on the
|
||||||
|
<code>IndexReader</code> instance; use your own
|
||||||
|
(non-Lucene) objects instead.
|
||||||
|
*/
|
||||||
|
public final class DirectoryReader extends BaseMultiReader<SegmentReader> {
|
||||||
|
static int DEFAULT_TERMS_INDEX_DIVISOR = 1;
|
||||||
|
|
||||||
protected final Directory directory;
|
protected final Directory directory;
|
||||||
private final IndexWriter writer;
|
private final IndexWriter writer;
|
||||||
private final SegmentInfos segmentInfos;
|
private final SegmentInfos segmentInfos;
|
||||||
private final int termInfosIndexDivisor;
|
private final int termInfosIndexDivisor;
|
||||||
private final boolean applyAllDeletes;
|
private final boolean applyAllDeletes;
|
||||||
|
|
||||||
|
/** Returns a IndexReader reading the index in the given
|
||||||
|
* Directory
|
||||||
|
* @param directory the index directory
|
||||||
|
* @throws CorruptIndexException if the index is corrupt
|
||||||
|
* @throws IOException if there is a low-level IO error
|
||||||
|
*/
|
||||||
|
public static DirectoryReader open(final Directory directory) throws CorruptIndexException, IOException {
|
||||||
|
return open(directory, null, DEFAULT_TERMS_INDEX_DIVISOR);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Expert: Returns a IndexReader reading the index in the given
|
||||||
|
* Directory with the given termInfosIndexDivisor.
|
||||||
|
* @param directory the index directory
|
||||||
|
* @param termInfosIndexDivisor Subsamples which indexed
|
||||||
|
* terms are loaded into RAM. This has the same effect as {@link
|
||||||
|
* IndexWriterConfig#setTermIndexInterval} except that setting
|
||||||
|
* must be done at indexing time while this setting can be
|
||||||
|
* set per reader. When set to N, then one in every
|
||||||
|
* N*termIndexInterval terms in the index is loaded into
|
||||||
|
* memory. By setting this to a value > 1 you can reduce
|
||||||
|
* memory usage, at the expense of higher latency when
|
||||||
|
* loading a TermInfo. The default value is 1. Set this
|
||||||
|
* to -1 to skip loading the terms index entirely.
|
||||||
|
* @throws CorruptIndexException if the index is corrupt
|
||||||
|
* @throws IOException if there is a low-level IO error
|
||||||
|
*/
|
||||||
|
public static DirectoryReader open(final Directory directory, int termInfosIndexDivisor) throws CorruptIndexException, IOException {
|
||||||
|
return open(directory, null, termInfosIndexDivisor);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Open a near real time IndexReader from the {@link org.apache.lucene.index.IndexWriter}.
|
||||||
|
*
|
||||||
|
* @param writer The IndexWriter to open from
|
||||||
|
* @param applyAllDeletes If true, all buffered deletes will
|
||||||
|
* be applied (made visible) in the returned reader. If
|
||||||
|
* false, the deletes are not applied but remain buffered
|
||||||
|
* (in IndexWriter) so that they will be applied in the
|
||||||
|
* future. Applying deletes can be costly, so if your app
|
||||||
|
* can tolerate deleted documents being returned you might
|
||||||
|
* gain some performance by passing false.
|
||||||
|
* @return The new IndexReader
|
||||||
|
* @throws CorruptIndexException
|
||||||
|
* @throws IOException if there is a low-level IO error
|
||||||
|
*
|
||||||
|
* @see #openIfChanged(DirectoryReader,IndexWriter,boolean)
|
||||||
|
*
|
||||||
|
* @lucene.experimental
|
||||||
|
*/
|
||||||
|
public static DirectoryReader open(final IndexWriter writer, boolean applyAllDeletes) throws CorruptIndexException, IOException {
|
||||||
|
return writer.getReader(applyAllDeletes);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Expert: returns an IndexReader reading the index in the given
|
||||||
|
* {@link IndexCommit}.
|
||||||
|
* @param commit the commit point to open
|
||||||
|
* @throws CorruptIndexException if the index is corrupt
|
||||||
|
* @throws IOException if there is a low-level IO error
|
||||||
|
*/
|
||||||
|
public static DirectoryReader open(final IndexCommit commit) throws CorruptIndexException, IOException {
|
||||||
|
return open(commit.getDirectory(), commit, DEFAULT_TERMS_INDEX_DIVISOR);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/** Expert: returns an IndexReader reading the index in the given
|
||||||
|
* {@link IndexCommit} and termInfosIndexDivisor.
|
||||||
|
* @param commit the commit point to open
|
||||||
|
* @param termInfosIndexDivisor Subsamples which indexed
|
||||||
|
* terms are loaded into RAM. This has the same effect as {@link
|
||||||
|
* IndexWriterConfig#setTermIndexInterval} except that setting
|
||||||
|
* must be done at indexing time while this setting can be
|
||||||
|
* set per reader. When set to N, then one in every
|
||||||
|
* N*termIndexInterval terms in the index is loaded into
|
||||||
|
* memory. By setting this to a value > 1 you can reduce
|
||||||
|
* memory usage, at the expense of higher latency when
|
||||||
|
* loading a TermInfo. The default value is 1. Set this
|
||||||
|
* to -1 to skip loading the terms index entirely.
|
||||||
|
* @throws CorruptIndexException if the index is corrupt
|
||||||
|
* @throws IOException if there is a low-level IO error
|
||||||
|
*/
|
||||||
|
public static DirectoryReader open(final IndexCommit commit, int termInfosIndexDivisor) throws CorruptIndexException, IOException {
|
||||||
|
return open(commit.getDirectory(), commit, termInfosIndexDivisor);
|
||||||
|
}
|
||||||
|
|
||||||
DirectoryReader(SegmentReader[] readers, Directory directory, IndexWriter writer,
|
DirectoryReader(SegmentReader[] readers, Directory directory, IndexWriter writer,
|
||||||
SegmentInfos sis, int termInfosIndexDivisor, boolean applyAllDeletes) throws IOException {
|
SegmentInfos sis, int termInfosIndexDivisor, boolean applyAllDeletes) throws IOException {
|
||||||
super(readers);
|
super(readers);
|
||||||
|
@ -50,9 +156,9 @@ final class DirectoryReader extends BaseMultiReader<SegmentReader> {
|
||||||
this.applyAllDeletes = applyAllDeletes;
|
this.applyAllDeletes = applyAllDeletes;
|
||||||
}
|
}
|
||||||
|
|
||||||
static IndexReader open(final Directory directory, final IndexCommit commit,
|
private static DirectoryReader open(final Directory directory, final IndexCommit commit,
|
||||||
final int termInfosIndexDivisor) throws CorruptIndexException, IOException {
|
final int termInfosIndexDivisor) throws CorruptIndexException, IOException {
|
||||||
return (IndexReader) new SegmentInfos.FindSegmentsFile(directory) {
|
return (DirectoryReader) new SegmentInfos.FindSegmentsFile(directory) {
|
||||||
@Override
|
@Override
|
||||||
protected Object doBody(String segmentFileName) throws CorruptIndexException, IOException {
|
protected Object doBody(String segmentFileName) throws CorruptIndexException, IOException {
|
||||||
SegmentInfos sis = new SegmentInfos();
|
SegmentInfos sis = new SegmentInfos();
|
||||||
|
@ -116,7 +222,7 @@ final class DirectoryReader extends BaseMultiReader<SegmentReader> {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** This constructor is only used for {@link #doOpenIfChanged()} */
|
/** This constructor is only used for {@link #doOpenIfChanged()} */
|
||||||
static DirectoryReader open(Directory directory, IndexWriter writer, SegmentInfos infos, SegmentReader[] oldReaders,
|
private static DirectoryReader open(Directory directory, IndexWriter writer, SegmentInfos infos, SegmentReader[] oldReaders,
|
||||||
int termInfosIndexDivisor) throws IOException {
|
int termInfosIndexDivisor) throws IOException {
|
||||||
// we put the old SegmentReaders in a map, that allows us
|
// we put the old SegmentReaders in a map, that allows us
|
||||||
// to lookup a reader using its segment name
|
// to lookup a reader using its segment name
|
||||||
|
@ -202,6 +308,116 @@ final class DirectoryReader extends BaseMultiReader<SegmentReader> {
|
||||||
infos, termInfosIndexDivisor, false);
|
infos, termInfosIndexDivisor, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* If the index has changed since the provided reader was
|
||||||
|
* opened, open and return a new reader; else, return
|
||||||
|
* null. The new reader, if not null, will be the same
|
||||||
|
* type of reader as the previous one, ie an NRT reader
|
||||||
|
* will open a new NRT reader, a MultiReader will open a
|
||||||
|
* new MultiReader, etc.
|
||||||
|
*
|
||||||
|
* <p>This method is typically far less costly than opening a
|
||||||
|
* fully new <code>DirectoryReader</code> as it shares
|
||||||
|
* resources (for example sub-readers) with the provided
|
||||||
|
* <code>DirectoryReader</code>, when possible.
|
||||||
|
*
|
||||||
|
* <p>The provided reader is not closed (you are responsible
|
||||||
|
* for doing so); if a new reader is returned you also
|
||||||
|
* must eventually close it. Be sure to never close a
|
||||||
|
* reader while other threads are still using it; see
|
||||||
|
* {@link SearcherManager} to simplify managing this.
|
||||||
|
*
|
||||||
|
* @throws CorruptIndexException if the index is corrupt
|
||||||
|
* @throws IOException if there is a low-level IO error
|
||||||
|
* @return null if there are no changes; else, a new
|
||||||
|
* DirectoryReader instance which you must eventually close
|
||||||
|
*/
|
||||||
|
public static DirectoryReader openIfChanged(DirectoryReader oldReader) throws IOException {
|
||||||
|
final DirectoryReader newReader = oldReader.doOpenIfChanged();
|
||||||
|
assert newReader != oldReader;
|
||||||
|
return newReader;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* If the IndexCommit differs from what the
|
||||||
|
* provided reader is searching, open and return a new
|
||||||
|
* reader; else, return null.
|
||||||
|
*
|
||||||
|
* @see #openIfChanged(DirectoryReader)
|
||||||
|
*/
|
||||||
|
public static DirectoryReader openIfChanged(DirectoryReader oldReader, IndexCommit commit) throws IOException {
|
||||||
|
final DirectoryReader newReader = oldReader.doOpenIfChanged(commit);
|
||||||
|
assert newReader != oldReader;
|
||||||
|
return newReader;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Expert: If there changes (committed or not) in the
|
||||||
|
* {@link IndexWriter} versus what the provided reader is
|
||||||
|
* searching, then open and return a new
|
||||||
|
* IndexReader searching both committed and uncommitted
|
||||||
|
* changes from the writer; else, return null (though, the
|
||||||
|
* current implementation never returns null).
|
||||||
|
*
|
||||||
|
* <p>This provides "near real-time" searching, in that
|
||||||
|
* changes made during an {@link IndexWriter} session can be
|
||||||
|
* quickly made available for searching without closing
|
||||||
|
* the writer nor calling {@link IndexWriter#commit}.
|
||||||
|
*
|
||||||
|
* <p>It's <i>near</i> real-time because there is no hard
|
||||||
|
* guarantee on how quickly you can get a new reader after
|
||||||
|
* making changes with IndexWriter. You'll have to
|
||||||
|
* experiment in your situation to determine if it's
|
||||||
|
* fast enough. As this is a new and experimental
|
||||||
|
* feature, please report back on your findings so we can
|
||||||
|
* learn, improve and iterate.</p>
|
||||||
|
*
|
||||||
|
* <p>The very first time this method is called, this
|
||||||
|
* writer instance will make every effort to pool the
|
||||||
|
* readers that it opens for doing merges, applying
|
||||||
|
* deletes, etc. This means additional resources (RAM,
|
||||||
|
* file descriptors, CPU time) will be consumed.</p>
|
||||||
|
*
|
||||||
|
* <p>For lower latency on reopening a reader, you should
|
||||||
|
* call {@link IndexWriterConfig#setMergedSegmentWarmer} to
|
||||||
|
* pre-warm a newly merged segment before it's committed
|
||||||
|
* to the index. This is important for minimizing
|
||||||
|
* index-to-search delay after a large merge. </p>
|
||||||
|
*
|
||||||
|
* <p>If an addIndexes* call is running in another thread,
|
||||||
|
* then this reader will only search those segments from
|
||||||
|
* the foreign index that have been successfully copied
|
||||||
|
* over, so far.</p>
|
||||||
|
*
|
||||||
|
* <p><b>NOTE</b>: Once the writer is closed, any
|
||||||
|
* outstanding readers may continue to be used. However,
|
||||||
|
* if you attempt to reopen any of those readers, you'll
|
||||||
|
* hit an {@link org.apache.lucene.store.AlreadyClosedException}.</p>
|
||||||
|
*
|
||||||
|
* @return DirectoryReader that covers entire index plus all
|
||||||
|
* changes made so far by this IndexWriter instance, or
|
||||||
|
* null if there are no new changes
|
||||||
|
*
|
||||||
|
* @param writer The IndexWriter to open from
|
||||||
|
*
|
||||||
|
* @param applyAllDeletes If true, all buffered deletes will
|
||||||
|
* be applied (made visible) in the returned reader. If
|
||||||
|
* false, the deletes are not applied but remain buffered
|
||||||
|
* (in IndexWriter) so that they will be applied in the
|
||||||
|
* future. Applying deletes can be costly, so if your app
|
||||||
|
* can tolerate deleted documents being returned you might
|
||||||
|
* gain some performance by passing false.
|
||||||
|
*
|
||||||
|
* @throws IOException
|
||||||
|
*
|
||||||
|
* @lucene.experimental
|
||||||
|
*/
|
||||||
|
public static DirectoryReader openIfChanged(DirectoryReader oldReader, IndexWriter writer, boolean applyAllDeletes) throws IOException {
|
||||||
|
final DirectoryReader newReader = oldReader.doOpenIfChanged(writer, applyAllDeletes);
|
||||||
|
assert newReader != oldReader;
|
||||||
|
return newReader;
|
||||||
|
}
|
||||||
|
|
||||||
/** {@inheritDoc} */
|
/** {@inheritDoc} */
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
|
@ -223,13 +439,11 @@ final class DirectoryReader extends BaseMultiReader<SegmentReader> {
|
||||||
return buffer.toString();
|
return buffer.toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
protected final DirectoryReader doOpenIfChanged() throws CorruptIndexException, IOException {
|
||||||
protected final IndexReader doOpenIfChanged() throws CorruptIndexException, IOException {
|
|
||||||
return doOpenIfChanged(null);
|
return doOpenIfChanged(null);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
protected final DirectoryReader doOpenIfChanged(final IndexCommit commit) throws CorruptIndexException, IOException {
|
||||||
protected final IndexReader doOpenIfChanged(final IndexCommit commit) throws CorruptIndexException, IOException {
|
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
|
|
||||||
// If we were obtained by writer.getReader(), re-ask the
|
// If we were obtained by writer.getReader(), re-ask the
|
||||||
|
@ -241,18 +455,16 @@ final class DirectoryReader extends BaseMultiReader<SegmentReader> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
protected final DirectoryReader doOpenIfChanged(IndexWriter writer, boolean applyAllDeletes) throws CorruptIndexException, IOException {
|
||||||
protected final IndexReader doOpenIfChanged(IndexWriter writer, boolean applyAllDeletes) throws CorruptIndexException, IOException {
|
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
if (writer == this.writer && applyAllDeletes == this.applyAllDeletes) {
|
if (writer == this.writer && applyAllDeletes == this.applyAllDeletes) {
|
||||||
return doOpenFromWriter(null);
|
return doOpenFromWriter(null);
|
||||||
} else {
|
} else {
|
||||||
// fail by calling supers impl throwing UOE
|
return writer.getReader(applyAllDeletes);
|
||||||
return super.doOpenIfChanged(writer, applyAllDeletes);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private final IndexReader doOpenFromWriter(IndexCommit commit) throws CorruptIndexException, IOException {
|
private final DirectoryReader doOpenFromWriter(IndexCommit commit) throws CorruptIndexException, IOException {
|
||||||
if (commit != null) {
|
if (commit != null) {
|
||||||
throw new IllegalArgumentException("a reader obtained from IndexWriter.getReader() cannot currently accept a commit");
|
throw new IllegalArgumentException("a reader obtained from IndexWriter.getReader() cannot currently accept a commit");
|
||||||
}
|
}
|
||||||
|
@ -261,7 +473,7 @@ final class DirectoryReader extends BaseMultiReader<SegmentReader> {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
IndexReader reader = writer.getReader(applyAllDeletes);
|
DirectoryReader reader = writer.getReader(applyAllDeletes);
|
||||||
|
|
||||||
// If in fact no changes took place, return null:
|
// If in fact no changes took place, return null:
|
||||||
if (reader.getVersion() == segmentInfos.getVersion()) {
|
if (reader.getVersion() == segmentInfos.getVersion()) {
|
||||||
|
@ -272,7 +484,7 @@ final class DirectoryReader extends BaseMultiReader<SegmentReader> {
|
||||||
return reader;
|
return reader;
|
||||||
}
|
}
|
||||||
|
|
||||||
private synchronized IndexReader doOpenNoWriter(IndexCommit commit) throws CorruptIndexException, IOException {
|
private synchronized DirectoryReader doOpenNoWriter(IndexCommit commit) throws CorruptIndexException, IOException {
|
||||||
|
|
||||||
if (commit == null) {
|
if (commit == null) {
|
||||||
if (isCurrent()) {
|
if (isCurrent()) {
|
||||||
|
@ -287,7 +499,7 @@ final class DirectoryReader extends BaseMultiReader<SegmentReader> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return (IndexReader) new SegmentInfos.FindSegmentsFile(directory) {
|
return (DirectoryReader) new SegmentInfos.FindSegmentsFile(directory) {
|
||||||
@Override
|
@Override
|
||||||
protected Object doBody(String segmentFileName) throws CorruptIndexException, IOException {
|
protected Object doBody(String segmentFileName) throws CorruptIndexException, IOException {
|
||||||
final SegmentInfos infos = new SegmentInfos();
|
final SegmentInfos infos = new SegmentInfos();
|
||||||
|
@ -301,20 +513,25 @@ final class DirectoryReader extends BaseMultiReader<SegmentReader> {
|
||||||
return DirectoryReader.open(directory, writer, infos, subReaders, termInfosIndexDivisor);
|
return DirectoryReader.open(directory, writer, infos, subReaders, termInfosIndexDivisor);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Version number when this IndexReader was opened. */
|
/**
|
||||||
@Override
|
* Version number when this IndexReader was opened. Not
|
||||||
|
* implemented in the IndexReader base class.
|
||||||
|
*
|
||||||
|
* <p>This method
|
||||||
|
* returns the version recorded in the commit that the
|
||||||
|
* reader opened. This version is advanced every time
|
||||||
|
* a change is made with {@link IndexWriter}.</p>
|
||||||
|
*/
|
||||||
public long getVersion() {
|
public long getVersion() {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
return segmentInfos.getVersion();
|
return segmentInfos.getVersion();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public Map<String,String> getCommitUserData() {
|
public Map<String,String> getCommitUserData() {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
return segmentInfos.getUserData();
|
return segmentInfos.getUserData();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean isCurrent() throws CorruptIndexException, IOException {
|
public boolean isCurrent() throws CorruptIndexException, IOException {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
if (writer == null || writer.isClosed()) {
|
if (writer == null || writer.isClosed()) {
|
||||||
|
@ -348,7 +565,6 @@ final class DirectoryReader extends BaseMultiReader<SegmentReader> {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Returns the directory this index resides in. */
|
/** Returns the directory this index resides in. */
|
||||||
@Override
|
|
||||||
public Directory directory() {
|
public Directory directory() {
|
||||||
// Don't ensureOpen here -- in certain cases, when a
|
// Don't ensureOpen here -- in certain cases, when a
|
||||||
// cloned/reopened reader needs to commit, it may call
|
// cloned/reopened reader needs to commit, it may call
|
||||||
|
@ -356,7 +572,6 @@ final class DirectoryReader extends BaseMultiReader<SegmentReader> {
|
||||||
return directory;
|
return directory;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public int getTermInfosIndexDivisor() {
|
public int getTermInfosIndexDivisor() {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
return termInfosIndexDivisor;
|
return termInfosIndexDivisor;
|
||||||
|
@ -367,13 +582,26 @@ final class DirectoryReader extends BaseMultiReader<SegmentReader> {
|
||||||
* <p/>
|
* <p/>
|
||||||
* @lucene.experimental
|
* @lucene.experimental
|
||||||
*/
|
*/
|
||||||
@Override
|
|
||||||
public IndexCommit getIndexCommit() throws IOException {
|
public IndexCommit getIndexCommit() throws IOException {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
return new ReaderCommit(segmentInfos, directory);
|
return new ReaderCommit(segmentInfos, directory);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @see org.apache.lucene.index.IndexReader#listCommits */
|
/** Returns all commit points that exist in the Directory.
|
||||||
|
* Normally, because the default is {@link
|
||||||
|
* KeepOnlyLastCommitDeletionPolicy}, there would be only
|
||||||
|
* one commit point. But if you're using a custom {@link
|
||||||
|
* IndexDeletionPolicy} then there could be many commits.
|
||||||
|
* Once you have a given commit, you can open a reader on
|
||||||
|
* it by calling {@link IndexReader#open(IndexCommit)}
|
||||||
|
* There must be at least one commit in
|
||||||
|
* the Directory, else this method throws {@link
|
||||||
|
* IndexNotFoundException}. Note that if a commit is in
|
||||||
|
* progress while this method is running, that commit
|
||||||
|
* may or may not be returned.
|
||||||
|
*
|
||||||
|
* @return a sorted list of {@link IndexCommit}s, from oldest
|
||||||
|
* to latest. */
|
||||||
public static List<IndexCommit> listCommits(Directory dir) throws IOException {
|
public static List<IndexCommit> listCommits(Directory dir) throws IOException {
|
||||||
final String[] files = dir.listAll();
|
final String[] files = dir.listAll();
|
||||||
|
|
||||||
|
@ -420,6 +648,53 @@ final class DirectoryReader extends BaseMultiReader<SegmentReader> {
|
||||||
return commits;
|
return commits;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Reads version number from segments files. The version number is
|
||||||
|
* initialized with a timestamp and then increased by one for each change of
|
||||||
|
* the index.
|
||||||
|
*
|
||||||
|
* @param directory where the index resides.
|
||||||
|
* @return version number.
|
||||||
|
* @throws CorruptIndexException if the index is corrupt
|
||||||
|
* @throws IOException if there is a low-level IO error
|
||||||
|
*/
|
||||||
|
public static long getCurrentVersion(Directory directory) throws CorruptIndexException, IOException {
|
||||||
|
return SegmentInfos.readCurrentVersion(directory);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Reads commitUserData, previously passed to {@link
|
||||||
|
* IndexWriter#commit(Map)}, from current index
|
||||||
|
* segments file. This will return null if {@link
|
||||||
|
* IndexWriter#commit(Map)} has never been called for
|
||||||
|
* this index.
|
||||||
|
*
|
||||||
|
* @param directory where the index resides.
|
||||||
|
* @return commit userData.
|
||||||
|
* @throws CorruptIndexException if the index is corrupt
|
||||||
|
* @throws IOException if there is a low-level IO error
|
||||||
|
*
|
||||||
|
* @see #getCommitUserData()
|
||||||
|
*/
|
||||||
|
public static Map<String, String> getCommitUserData(Directory directory) throws CorruptIndexException, IOException {
|
||||||
|
return SegmentInfos.readCurrentUserData(directory);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns <code>true</code> if an index exists at the specified directory.
|
||||||
|
* @param directory the directory to check for an index
|
||||||
|
* @return <code>true</code> if an index exists; <code>false</code> otherwise
|
||||||
|
* @throws IOException if there is a problem with accessing the index
|
||||||
|
*/
|
||||||
|
public static boolean indexExists(Directory directory) throws IOException {
|
||||||
|
try {
|
||||||
|
new SegmentInfos().read(directory);
|
||||||
|
return true;
|
||||||
|
} catch (IOException ioe) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private static final class ReaderCommit extends IndexCommit {
|
private static final class ReaderCommit extends IndexCommit {
|
||||||
private String segmentsFileName;
|
private String segmentsFileName;
|
||||||
Collection<String> files;
|
Collection<String> files;
|
||||||
|
|
|
@ -68,8 +68,6 @@ import java.util.Comparator;
|
||||||
*
|
*
|
||||||
* The RAM consumption of this class can be high!
|
* The RAM consumption of this class can be high!
|
||||||
*
|
*
|
||||||
* <p>NOTE: the provided reader must be an atomic reader
|
|
||||||
*
|
|
||||||
* @lucene.experimental
|
* @lucene.experimental
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
@ -149,19 +147,19 @@ public class DocTermOrds {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Inverts all terms */
|
/** Inverts all terms */
|
||||||
public DocTermOrds(IndexReader reader, String field) throws IOException {
|
public DocTermOrds(AtomicReader reader, String field) throws IOException {
|
||||||
this(reader, field, null, Integer.MAX_VALUE);
|
this(reader, field, null, Integer.MAX_VALUE);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Inverts only terms starting w/ prefix */
|
/** Inverts only terms starting w/ prefix */
|
||||||
public DocTermOrds(IndexReader reader, String field, BytesRef termPrefix) throws IOException {
|
public DocTermOrds(AtomicReader reader, String field, BytesRef termPrefix) throws IOException {
|
||||||
this(reader, field, termPrefix, Integer.MAX_VALUE);
|
this(reader, field, termPrefix, Integer.MAX_VALUE);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Inverts only terms starting w/ prefix, and only terms
|
/** Inverts only terms starting w/ prefix, and only terms
|
||||||
* whose docFreq (not taking deletions into account) is
|
* whose docFreq (not taking deletions into account) is
|
||||||
* <= maxTermDocFreq */
|
* <= maxTermDocFreq */
|
||||||
public DocTermOrds(IndexReader reader, String field, BytesRef termPrefix, int maxTermDocFreq) throws IOException {
|
public DocTermOrds(AtomicReader reader, String field, BytesRef termPrefix, int maxTermDocFreq) throws IOException {
|
||||||
this(reader, field, termPrefix, maxTermDocFreq, DEFAULT_INDEX_INTERVAL_BITS);
|
this(reader, field, termPrefix, maxTermDocFreq, DEFAULT_INDEX_INTERVAL_BITS);
|
||||||
uninvert(reader, termPrefix);
|
uninvert(reader, termPrefix);
|
||||||
}
|
}
|
||||||
|
@ -170,7 +168,7 @@ public class DocTermOrds {
|
||||||
* whose docFreq (not taking deletions into account) is
|
* whose docFreq (not taking deletions into account) is
|
||||||
* <= maxTermDocFreq, with a custom indexing interval
|
* <= maxTermDocFreq, with a custom indexing interval
|
||||||
* (default is every 128nd term). */
|
* (default is every 128nd term). */
|
||||||
public DocTermOrds(IndexReader reader, String field, BytesRef termPrefix, int maxTermDocFreq, int indexIntervalBits) throws IOException {
|
public DocTermOrds(AtomicReader reader, String field, BytesRef termPrefix, int maxTermDocFreq, int indexIntervalBits) throws IOException {
|
||||||
this(field, maxTermDocFreq, indexIntervalBits);
|
this(field, maxTermDocFreq, indexIntervalBits);
|
||||||
uninvert(reader, termPrefix);
|
uninvert(reader, termPrefix);
|
||||||
}
|
}
|
||||||
|
@ -196,7 +194,7 @@ public class DocTermOrds {
|
||||||
*
|
*
|
||||||
* <p><b>NOTE</b>: you must pass the same reader that was
|
* <p><b>NOTE</b>: you must pass the same reader that was
|
||||||
* used when creating this class */
|
* used when creating this class */
|
||||||
public TermsEnum getOrdTermsEnum(IndexReader reader) throws IOException {
|
public TermsEnum getOrdTermsEnum(AtomicReader reader) throws IOException {
|
||||||
if (termInstances == 0) {
|
if (termInstances == 0) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
@ -226,7 +224,7 @@ public class DocTermOrds {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Call this only once (if you subclass!)
|
// Call this only once (if you subclass!)
|
||||||
protected void uninvert(final IndexReader reader, final BytesRef termPrefix) throws IOException {
|
protected void uninvert(final AtomicReader reader, final BytesRef termPrefix) throws IOException {
|
||||||
//System.out.println("DTO uninvert field=" + field + " prefix=" + termPrefix);
|
//System.out.println("DTO uninvert field=" + field + " prefix=" + termPrefix);
|
||||||
final long startTime = System.currentTimeMillis();
|
final long startTime = System.currentTimeMillis();
|
||||||
prefix = termPrefix == null ? null : BytesRef.deepCopyOf(termPrefix);
|
prefix = termPrefix == null ? null : BytesRef.deepCopyOf(termPrefix);
|
||||||
|
@ -644,12 +642,12 @@ public class DocTermOrds {
|
||||||
* ord; in this case we "wrap" our own terms index
|
* ord; in this case we "wrap" our own terms index
|
||||||
* around it. */
|
* around it. */
|
||||||
private final class OrdWrappedTermsEnum extends TermsEnum {
|
private final class OrdWrappedTermsEnum extends TermsEnum {
|
||||||
private final IndexReader reader;
|
private final AtomicReader reader;
|
||||||
private final TermsEnum termsEnum;
|
private final TermsEnum termsEnum;
|
||||||
private BytesRef term;
|
private BytesRef term;
|
||||||
private long ord = -indexInterval-1; // force "real" seek
|
private long ord = -indexInterval-1; // force "real" seek
|
||||||
|
|
||||||
public OrdWrappedTermsEnum(IndexReader reader) throws IOException {
|
public OrdWrappedTermsEnum(AtomicReader reader) throws IOException {
|
||||||
this.reader = reader;
|
this.reader = reader;
|
||||||
assert indexedTermsArray != null;
|
assert indexedTermsArray != null;
|
||||||
termsEnum = reader.fields().terms(field).iterator(null);
|
termsEnum = reader.fields().terms(field).iterator(null);
|
||||||
|
|
|
@ -33,7 +33,7 @@ import org.apache.lucene.util.packed.PackedInts;
|
||||||
* <li>via {@link #getSource()} providing RAM resident random access</li>
|
* <li>via {@link #getSource()} providing RAM resident random access</li>
|
||||||
* <li>via {@link #getDirectSource()} providing on disk random access</li>
|
* <li>via {@link #getDirectSource()} providing on disk random access</li>
|
||||||
* </ul> {@link DocValues} are exposed via
|
* </ul> {@link DocValues} are exposed via
|
||||||
* {@link IndexReader#docValues(String)} on a per-segment basis. For best
|
* {@link AtomicReader#docValues(String)} on a per-segment basis. For best
|
||||||
* performance {@link DocValues} should be consumed per-segment just like
|
* performance {@link DocValues} should be consumed per-segment just like
|
||||||
* IndexReader.
|
* IndexReader.
|
||||||
* <p>
|
* <p>
|
||||||
|
|
|
@ -17,12 +17,10 @@ package org.apache.lucene.index;
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import org.apache.lucene.store.Directory;
|
|
||||||
import org.apache.lucene.util.Bits;
|
import org.apache.lucene.util.Bits;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Map;
|
|
||||||
import java.util.Comparator;
|
import java.util.Comparator;
|
||||||
|
|
||||||
/** A <code>FilterIndexReader</code> contains another IndexReader, which it
|
/** A <code>FilterIndexReader</code> contains another IndexReader, which it
|
||||||
|
@ -33,13 +31,8 @@ import java.util.Comparator;
|
||||||
* contained index reader. Subclasses of <code>FilterIndexReader</code> may
|
* contained index reader. Subclasses of <code>FilterIndexReader</code> may
|
||||||
* further override some of these methods and may also provide additional
|
* further override some of these methods and may also provide additional
|
||||||
* methods and fields.
|
* methods and fields.
|
||||||
* <p><b>Note:</b> The default implementation of {@link FilterIndexReader#doOpenIfChanged}
|
|
||||||
* throws {@link UnsupportedOperationException} (like the base class),
|
|
||||||
* so it's not possible to reopen a <code>FilterIndexReader</code>.
|
|
||||||
* To reopen, you have to first reopen the underlying reader
|
|
||||||
* and wrap it again with the custom filter.
|
|
||||||
*/
|
*/
|
||||||
public class FilterIndexReader extends IndexReader {
|
public class FilterIndexReader extends AtomicReader {
|
||||||
|
|
||||||
/** Base class for filtering {@link Fields}
|
/** Base class for filtering {@link Fields}
|
||||||
* implementations. */
|
* implementations. */
|
||||||
|
@ -279,24 +272,18 @@ public class FilterIndexReader extends IndexReader {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
protected IndexReader in;
|
protected AtomicReader in;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* <p>Construct a FilterIndexReader based on the specified base reader.
|
* <p>Construct a FilterIndexReader based on the specified base reader.
|
||||||
* <p>Note that base reader is closed if this FilterIndexReader is closed.</p>
|
* <p>Note that base reader is closed if this FilterIndexReader is closed.</p>
|
||||||
* @param in specified base reader.
|
* @param in specified base reader.
|
||||||
*/
|
*/
|
||||||
public FilterIndexReader(IndexReader in) {
|
public FilterIndexReader(AtomicReader in) {
|
||||||
super();
|
super();
|
||||||
this.in = in;
|
this.in = in;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public Directory directory() {
|
|
||||||
ensureOpen();
|
|
||||||
return in.directory();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Bits getLiveDocs() {
|
public Bits getLiveDocs() {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
|
@ -345,45 +332,11 @@ public class FilterIndexReader extends IndexReader {
|
||||||
return in.hasNorms(field);
|
return in.hasNorms(field);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public int docFreq(String field, BytesRef t) throws IOException {
|
|
||||||
ensureOpen();
|
|
||||||
return in.docFreq(field, t);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void doClose() throws IOException {
|
protected void doClose() throws IOException {
|
||||||
in.close();
|
in.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public long getVersion() {
|
|
||||||
ensureOpen();
|
|
||||||
return in.getVersion();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean isCurrent() throws CorruptIndexException, IOException {
|
|
||||||
ensureOpen();
|
|
||||||
return in.isCurrent();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public IndexReader[] getSequentialSubReaders() {
|
|
||||||
return in.getSequentialSubReaders();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public ReaderContext getTopReaderContext() {
|
|
||||||
ensureOpen();
|
|
||||||
return in.getTopReaderContext();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Map<String, String> getCommitUserData() {
|
|
||||||
return in.getCommitUserData();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Fields fields() throws IOException {
|
public Fields fields() throws IOException {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
|
@ -410,7 +363,7 @@ public class FilterIndexReader extends IndexReader {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
final StringBuilder buffer = new StringBuilder("FilterReader(");
|
final StringBuilder buffer = new StringBuilder("FilterIndexReader(");
|
||||||
buffer.append(in);
|
buffer.append(in);
|
||||||
buffer.append(')');
|
buffer.append(')');
|
||||||
return buffer.toString();
|
return buffer.toString();
|
||||||
|
@ -427,14 +380,4 @@ public class FilterIndexReader extends IndexReader {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
return in.normValues(field);
|
return in.normValues(field);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public IndexCommit getIndexCommit() throws IOException {
|
|
||||||
return in.getIndexCommit();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public int getTermInfosIndexDivisor() {
|
|
||||||
return in.getTermInfosIndexDivisor();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,8 +21,6 @@ import java.io.Closeable;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.LinkedHashSet;
|
import java.util.LinkedHashSet;
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.concurrent.atomic.AtomicInteger;
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
|
|
||||||
|
@ -38,26 +36,32 @@ import org.apache.lucene.util.ReaderUtil; // for javadocs
|
||||||
index. Search of an index is done entirely through this abstract interface,
|
index. Search of an index is done entirely through this abstract interface,
|
||||||
so that any subclass which implements it is searchable.
|
so that any subclass which implements it is searchable.
|
||||||
|
|
||||||
<p> Concrete subclasses of IndexReader are usually constructed with a call to
|
<p>There are two different types of IndexReaders:
|
||||||
one of the static <code>open()</code> methods, e.g. {@link
|
<ul>
|
||||||
#open(Directory)}.
|
<li>{@link AtomicReader}: These indexes do not consist of several sub-readers,
|
||||||
|
they are atomic. They support retrieval of stored fields, doc values, terms,
|
||||||
|
and postings.
|
||||||
|
<li>{@link CompositeReader}: Instances (like {@link DirectoryReader})
|
||||||
|
of this reader can only
|
||||||
|
be used to get stored fields from the underlying AtomicReaders,
|
||||||
|
but it is not possible to directly retrieve postings. To do that, get
|
||||||
|
the sub-readers via {@link CompositeReader#getSequentialSubReaders}.
|
||||||
|
Alternatively, you can mimic an {@link AtomicReader} (with a serious slowdown),
|
||||||
|
by wrapping composite readers with {@link SlowCompositeReaderWrapper}.
|
||||||
|
</ul>
|
||||||
|
|
||||||
|
<p>IndexReader instances for indexes on disk are usually constructed
|
||||||
|
with a call to one of the static <code>DirectoryReader,open()</code> methods,
|
||||||
|
e.g. {@link DirectoryReader#open(Directory)}. {@link DirectoryReader} implements
|
||||||
|
the {@link CompositeReader} interface, it is not possible to directly get postings.
|
||||||
|
|
||||||
<p> For efficiency, in this API documents are often referred to via
|
<p> For efficiency, in this API documents are often referred to via
|
||||||
<i>document numbers</i>, non-negative integers which each name a unique
|
<i>document numbers</i>, non-negative integers which each name a unique
|
||||||
document in the index. These document numbers are ephemeral--they may change
|
document in the index. These document numbers are ephemeral -- they may change
|
||||||
as documents are added to and deleted from an index. Clients should thus not
|
as documents are added to and deleted from an index. Clients should thus not
|
||||||
rely on a given document having the same number between sessions.
|
rely on a given document having the same number between sessions.
|
||||||
|
|
||||||
<p>
|
<p>
|
||||||
<b>NOTE</b>: for backwards API compatibility, several methods are not listed
|
|
||||||
as abstract, but have no useful implementations in this base class and
|
|
||||||
instead always throw UnsupportedOperationException. Subclasses are
|
|
||||||
strongly encouraged to override these methods, but in many cases may not
|
|
||||||
need to.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
|
|
||||||
<a name="thread-safety"></a><p><b>NOTE</b>: {@link
|
<a name="thread-safety"></a><p><b>NOTE</b>: {@link
|
||||||
IndexReader} instances are completely thread
|
IndexReader} instances are completely thread
|
||||||
safe, meaning multiple threads can call any of its methods,
|
safe, meaning multiple threads can call any of its methods,
|
||||||
|
@ -68,6 +72,12 @@ import org.apache.lucene.util.ReaderUtil; // for javadocs
|
||||||
*/
|
*/
|
||||||
public abstract class IndexReader implements Closeable {
|
public abstract class IndexReader implements Closeable {
|
||||||
|
|
||||||
|
IndexReader() {
|
||||||
|
if (!(this instanceof CompositeReader || this instanceof AtomicReader))
|
||||||
|
throw new Error("This class should never be directly extended, subclass AtomicReader or CompositeReader instead!");
|
||||||
|
refCount.set(1);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A custom listener that's invoked when the IndexReader
|
* A custom listener that's invoked when the IndexReader
|
||||||
* is closed.
|
* is closed.
|
||||||
|
@ -110,8 +120,6 @@ public abstract class IndexReader implements Closeable {
|
||||||
|
|
||||||
private final AtomicInteger refCount = new AtomicInteger();
|
private final AtomicInteger refCount = new AtomicInteger();
|
||||||
|
|
||||||
static int DEFAULT_TERMS_INDEX_DIVISOR = 1;
|
|
||||||
|
|
||||||
/** Expert: returns the current refCount for this reader */
|
/** Expert: returns the current refCount for this reader */
|
||||||
public final int getRefCount() {
|
public final int getRefCount() {
|
||||||
// NOTE: don't ensureOpen, so that callers can see
|
// NOTE: don't ensureOpen, so that callers can see
|
||||||
|
@ -172,23 +180,6 @@ public abstract class IndexReader implements Closeable {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** {@inheritDoc} */
|
|
||||||
@Override
|
|
||||||
public String toString() {
|
|
||||||
final StringBuilder buffer = new StringBuilder();
|
|
||||||
buffer.append(getClass().getSimpleName());
|
|
||||||
buffer.append('(');
|
|
||||||
final IndexReader[] subReaders = getSequentialSubReaders();
|
|
||||||
if ((subReaders != null) && (subReaders.length > 0)) {
|
|
||||||
buffer.append(subReaders[0]);
|
|
||||||
for (int i = 1; i < subReaders.length; ++i) {
|
|
||||||
buffer.append(" ").append(subReaders[i]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
buffer.append(')');
|
|
||||||
return buffer.toString();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Expert: decreases the refCount of this IndexReader
|
* Expert: decreases the refCount of this IndexReader
|
||||||
* instance. If the refCount drops to 0, then this
|
* instance. If the refCount drops to 0, then this
|
||||||
|
@ -219,10 +210,6 @@ public abstract class IndexReader implements Closeable {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
protected IndexReader() {
|
|
||||||
refCount.set(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @throws AlreadyClosedException if this IndexReader is closed
|
* @throws AlreadyClosedException if this IndexReader is closed
|
||||||
*/
|
*/
|
||||||
|
@ -237,9 +224,11 @@ public abstract class IndexReader implements Closeable {
|
||||||
* @param directory the index directory
|
* @param directory the index directory
|
||||||
* @throws CorruptIndexException if the index is corrupt
|
* @throws CorruptIndexException if the index is corrupt
|
||||||
* @throws IOException if there is a low-level IO error
|
* @throws IOException if there is a low-level IO error
|
||||||
|
* @deprecated Use {@link DirectoryReader#open(Directory)}
|
||||||
*/
|
*/
|
||||||
public static IndexReader open(final Directory directory) throws CorruptIndexException, IOException {
|
@Deprecated
|
||||||
return DirectoryReader.open(directory, null, DEFAULT_TERMS_INDEX_DIVISOR);
|
public static DirectoryReader open(final Directory directory) throws CorruptIndexException, IOException {
|
||||||
|
return DirectoryReader.open(directory);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Expert: Returns a IndexReader reading the index in the given
|
/** Expert: Returns a IndexReader reading the index in the given
|
||||||
|
@ -257,9 +246,11 @@ public abstract class IndexReader implements Closeable {
|
||||||
* to -1 to skip loading the terms index entirely.
|
* to -1 to skip loading the terms index entirely.
|
||||||
* @throws CorruptIndexException if the index is corrupt
|
* @throws CorruptIndexException if the index is corrupt
|
||||||
* @throws IOException if there is a low-level IO error
|
* @throws IOException if there is a low-level IO error
|
||||||
|
* @deprecated Use {@link DirectoryReader#open(Directory,int)}
|
||||||
*/
|
*/
|
||||||
public static IndexReader open(final Directory directory, int termInfosIndexDivisor) throws CorruptIndexException, IOException {
|
@Deprecated
|
||||||
return DirectoryReader.open(directory, null, termInfosIndexDivisor);
|
public static DirectoryReader open(final Directory directory, int termInfosIndexDivisor) throws CorruptIndexException, IOException {
|
||||||
|
return DirectoryReader.open(directory, termInfosIndexDivisor);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -277,12 +268,14 @@ public abstract class IndexReader implements Closeable {
|
||||||
* @throws CorruptIndexException
|
* @throws CorruptIndexException
|
||||||
* @throws IOException if there is a low-level IO error
|
* @throws IOException if there is a low-level IO error
|
||||||
*
|
*
|
||||||
* @see #openIfChanged(IndexReader,IndexWriter,boolean)
|
* @see DirectoryReader#openIfChanged(DirectoryReader,IndexWriter,boolean)
|
||||||
*
|
*
|
||||||
* @lucene.experimental
|
* @lucene.experimental
|
||||||
|
* @deprecated Use {@link DirectoryReader#open(IndexWriter,boolean)}
|
||||||
*/
|
*/
|
||||||
public static IndexReader open(final IndexWriter writer, boolean applyAllDeletes) throws CorruptIndexException, IOException {
|
@Deprecated
|
||||||
return writer.getReader(applyAllDeletes);
|
public static DirectoryReader open(final IndexWriter writer, boolean applyAllDeletes) throws CorruptIndexException, IOException {
|
||||||
|
return DirectoryReader.open(writer, applyAllDeletes);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Expert: returns an IndexReader reading the index in the given
|
/** Expert: returns an IndexReader reading the index in the given
|
||||||
|
@ -290,9 +283,11 @@ public abstract class IndexReader implements Closeable {
|
||||||
* @param commit the commit point to open
|
* @param commit the commit point to open
|
||||||
* @throws CorruptIndexException if the index is corrupt
|
* @throws CorruptIndexException if the index is corrupt
|
||||||
* @throws IOException if there is a low-level IO error
|
* @throws IOException if there is a low-level IO error
|
||||||
|
* @deprecated Use {@link DirectoryReader#open(IndexCommit)}
|
||||||
*/
|
*/
|
||||||
public static IndexReader open(final IndexCommit commit) throws CorruptIndexException, IOException {
|
@Deprecated
|
||||||
return DirectoryReader.open(commit.getDirectory(), commit, DEFAULT_TERMS_INDEX_DIVISOR);
|
public static DirectoryReader open(final IndexCommit commit) throws CorruptIndexException, IOException {
|
||||||
|
return DirectoryReader.open(commit);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -311,240 +306,11 @@ public abstract class IndexReader implements Closeable {
|
||||||
* to -1 to skip loading the terms index entirely.
|
* to -1 to skip loading the terms index entirely.
|
||||||
* @throws CorruptIndexException if the index is corrupt
|
* @throws CorruptIndexException if the index is corrupt
|
||||||
* @throws IOException if there is a low-level IO error
|
* @throws IOException if there is a low-level IO error
|
||||||
|
* @deprecated Use {@link DirectoryReader#open(IndexCommit,int)}
|
||||||
*/
|
*/
|
||||||
public static IndexReader open(final IndexCommit commit, int termInfosIndexDivisor) throws CorruptIndexException, IOException {
|
@Deprecated
|
||||||
return DirectoryReader.open(commit.getDirectory(), commit, termInfosIndexDivisor);
|
public static DirectoryReader open(final IndexCommit commit, int termInfosIndexDivisor) throws CorruptIndexException, IOException {
|
||||||
}
|
return DirectoryReader.open(commit, termInfosIndexDivisor);
|
||||||
|
|
||||||
/**
|
|
||||||
* If the index has changed since the provided reader was
|
|
||||||
* opened, open and return a new reader; else, return
|
|
||||||
* null. The new reader, if not null, will be the same
|
|
||||||
* type of reader as the previous one, ie an NRT reader
|
|
||||||
* will open a new NRT reader, a MultiReader will open a
|
|
||||||
* new MultiReader, etc.
|
|
||||||
*
|
|
||||||
* <p>This method is typically far less costly than opening a
|
|
||||||
* fully new <code>IndexReader</code> as it shares
|
|
||||||
* resources (for example sub-readers) with the provided
|
|
||||||
* <code>IndexReader</code>, when possible.
|
|
||||||
*
|
|
||||||
* <p>The provided reader is not closed (you are responsible
|
|
||||||
* for doing so); if a new reader is returned you also
|
|
||||||
* must eventually close it. Be sure to never close a
|
|
||||||
* reader while other threads are still using it; see
|
|
||||||
* {@link SearcherManager} to simplify managing this.
|
|
||||||
*
|
|
||||||
* @throws CorruptIndexException if the index is corrupt
|
|
||||||
* @throws IOException if there is a low-level IO error
|
|
||||||
* @return null if there are no changes; else, a new
|
|
||||||
* IndexReader instance which you must eventually close
|
|
||||||
*/
|
|
||||||
public static IndexReader openIfChanged(IndexReader oldReader) throws IOException {
|
|
||||||
final IndexReader newReader = oldReader.doOpenIfChanged();
|
|
||||||
assert newReader != oldReader;
|
|
||||||
return newReader;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* If the IndexCommit differs from what the
|
|
||||||
* provided reader is searching, open and return a new
|
|
||||||
* reader; else, return null.
|
|
||||||
*
|
|
||||||
* @see #openIfChanged(IndexReader)
|
|
||||||
*/
|
|
||||||
public static IndexReader openIfChanged(IndexReader oldReader, IndexCommit commit) throws IOException {
|
|
||||||
final IndexReader newReader = oldReader.doOpenIfChanged(commit);
|
|
||||||
assert newReader != oldReader;
|
|
||||||
return newReader;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Expert: If there changes (committed or not) in the
|
|
||||||
* {@link IndexWriter} versus what the provided reader is
|
|
||||||
* searching, then open and return a new
|
|
||||||
* IndexReader searching both committed and uncommitted
|
|
||||||
* changes from the writer; else, return null (though, the
|
|
||||||
* current implementation never returns null).
|
|
||||||
*
|
|
||||||
* <p>This provides "near real-time" searching, in that
|
|
||||||
* changes made during an {@link IndexWriter} session can be
|
|
||||||
* quickly made available for searching without closing
|
|
||||||
* the writer nor calling {@link IndexWriter#commit}.
|
|
||||||
*
|
|
||||||
* <p>It's <i>near</i> real-time because there is no hard
|
|
||||||
* guarantee on how quickly you can get a new reader after
|
|
||||||
* making changes with IndexWriter. You'll have to
|
|
||||||
* experiment in your situation to determine if it's
|
|
||||||
* fast enough. As this is a new and experimental
|
|
||||||
* feature, please report back on your findings so we can
|
|
||||||
* learn, improve and iterate.</p>
|
|
||||||
*
|
|
||||||
* <p>The very first time this method is called, this
|
|
||||||
* writer instance will make every effort to pool the
|
|
||||||
* readers that it opens for doing merges, applying
|
|
||||||
* deletes, etc. This means additional resources (RAM,
|
|
||||||
* file descriptors, CPU time) will be consumed.</p>
|
|
||||||
*
|
|
||||||
* <p>For lower latency on reopening a reader, you should
|
|
||||||
* call {@link IndexWriterConfig#setMergedSegmentWarmer} to
|
|
||||||
* pre-warm a newly merged segment before it's committed
|
|
||||||
* to the index. This is important for minimizing
|
|
||||||
* index-to-search delay after a large merge. </p>
|
|
||||||
*
|
|
||||||
* <p>If an addIndexes* call is running in another thread,
|
|
||||||
* then this reader will only search those segments from
|
|
||||||
* the foreign index that have been successfully copied
|
|
||||||
* over, so far.</p>
|
|
||||||
*
|
|
||||||
* <p><b>NOTE</b>: Once the writer is closed, any
|
|
||||||
* outstanding readers may continue to be used. However,
|
|
||||||
* if you attempt to reopen any of those readers, you'll
|
|
||||||
* hit an {@link AlreadyClosedException}.</p>
|
|
||||||
*
|
|
||||||
* @return IndexReader that covers entire index plus all
|
|
||||||
* changes made so far by this IndexWriter instance, or
|
|
||||||
* null if there are no new changes
|
|
||||||
*
|
|
||||||
* @param writer The IndexWriter to open from
|
|
||||||
*
|
|
||||||
* @param applyAllDeletes If true, all buffered deletes will
|
|
||||||
* be applied (made visible) in the returned reader. If
|
|
||||||
* false, the deletes are not applied but remain buffered
|
|
||||||
* (in IndexWriter) so that they will be applied in the
|
|
||||||
* future. Applying deletes can be costly, so if your app
|
|
||||||
* can tolerate deleted documents being returned you might
|
|
||||||
* gain some performance by passing false.
|
|
||||||
*
|
|
||||||
* @throws IOException
|
|
||||||
*
|
|
||||||
* @lucene.experimental
|
|
||||||
*/
|
|
||||||
public static IndexReader openIfChanged(IndexReader oldReader, IndexWriter writer, boolean applyAllDeletes) throws IOException {
|
|
||||||
final IndexReader newReader = oldReader.doOpenIfChanged(writer, applyAllDeletes);
|
|
||||||
assert newReader != oldReader;
|
|
||||||
return newReader;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* If the index has changed since it was opened, open and return a new reader;
|
|
||||||
* else, return {@code null}.
|
|
||||||
*
|
|
||||||
* @see #openIfChanged(IndexReader)
|
|
||||||
*/
|
|
||||||
protected IndexReader doOpenIfChanged() throws CorruptIndexException, IOException {
|
|
||||||
throw new UnsupportedOperationException("This reader does not support reopen().");
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* If the index has changed since it was opened, open and return a new reader;
|
|
||||||
* else, return {@code null}.
|
|
||||||
*
|
|
||||||
* @see #openIfChanged(IndexReader, IndexCommit)
|
|
||||||
*/
|
|
||||||
protected IndexReader doOpenIfChanged(final IndexCommit commit) throws CorruptIndexException, IOException {
|
|
||||||
throw new UnsupportedOperationException("This reader does not support reopen(IndexCommit).");
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* If the index has changed since it was opened, open and return a new reader;
|
|
||||||
* else, return {@code null}.
|
|
||||||
*
|
|
||||||
* @see #openIfChanged(IndexReader, IndexWriter, boolean)
|
|
||||||
*/
|
|
||||||
protected IndexReader doOpenIfChanged(IndexWriter writer, boolean applyAllDeletes) throws CorruptIndexException, IOException {
|
|
||||||
return writer.getReader(applyAllDeletes);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the directory associated with this index. The Default
|
|
||||||
* implementation returns the directory specified by subclasses when
|
|
||||||
* delegating to the IndexReader(Directory) constructor, or throws an
|
|
||||||
* UnsupportedOperationException if one was not specified.
|
|
||||||
* @throws UnsupportedOperationException if no directory
|
|
||||||
*/
|
|
||||||
public Directory directory() {
|
|
||||||
ensureOpen();
|
|
||||||
throw new UnsupportedOperationException("This reader does not support this method.");
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Reads commitUserData, previously passed to {@link
|
|
||||||
* IndexWriter#commit(Map)}, from current index
|
|
||||||
* segments file. This will return null if {@link
|
|
||||||
* IndexWriter#commit(Map)} has never been called for
|
|
||||||
* this index.
|
|
||||||
*
|
|
||||||
* @param directory where the index resides.
|
|
||||||
* @return commit userData.
|
|
||||||
* @throws CorruptIndexException if the index is corrupt
|
|
||||||
* @throws IOException if there is a low-level IO error
|
|
||||||
*
|
|
||||||
* @see #getCommitUserData()
|
|
||||||
*/
|
|
||||||
public static Map<String, String> getCommitUserData(Directory directory) throws CorruptIndexException, IOException {
|
|
||||||
return SegmentInfos.readCurrentUserData(directory);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Version number when this IndexReader was opened. Not
|
|
||||||
* implemented in the IndexReader base class.
|
|
||||||
*
|
|
||||||
* <p>If this reader is based on a Directory (ie, was
|
|
||||||
* created by calling {@link #open}, or {@link #openIfChanged} on
|
|
||||||
* a reader based on a Directory), then this method
|
|
||||||
* returns the version recorded in the commit that the
|
|
||||||
* reader opened. This version is advanced every time
|
|
||||||
* a change is made with {@link IndexWriter}.</p>
|
|
||||||
*
|
|
||||||
* @throws UnsupportedOperationException unless overridden in subclass
|
|
||||||
*/
|
|
||||||
public long getVersion() {
|
|
||||||
throw new UnsupportedOperationException("This reader does not support this method.");
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Retrieve the String userData optionally passed to
|
|
||||||
* IndexWriter#commit. This will return null if {@link
|
|
||||||
* IndexWriter#commit(Map)} has never been called for
|
|
||||||
* this index.
|
|
||||||
*
|
|
||||||
* @see #getCommitUserData(Directory)
|
|
||||||
*/
|
|
||||||
public Map<String,String> getCommitUserData() {
|
|
||||||
throw new UnsupportedOperationException("This reader does not support this method.");
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Check whether any new changes have occurred to the
|
|
||||||
* index since this reader was opened.
|
|
||||||
*
|
|
||||||
* <p>If this reader is based on a Directory (ie, was
|
|
||||||
* created by calling {@link #open}, or {@link #openIfChanged} on
|
|
||||||
* a reader based on a Directory), then this method checks
|
|
||||||
* if any further commits (see {@link IndexWriter#commit}
|
|
||||||
* have occurred in that directory).</p>
|
|
||||||
*
|
|
||||||
* <p>If instead this reader is a near real-time reader
|
|
||||||
* (ie, obtained by a call to {@link
|
|
||||||
* IndexWriter#getReader}, or by calling {@link #openIfChanged}
|
|
||||||
* on a near real-time reader), then this method checks if
|
|
||||||
* either a new commit has occurred, or any new
|
|
||||||
* uncommitted changes have taken place via the writer.
|
|
||||||
* Note that even if the writer has only performed
|
|
||||||
* merging, this method will still return false.</p>
|
|
||||||
*
|
|
||||||
* <p>In any event, if this returns false, you should call
|
|
||||||
* {@link #openIfChanged} to get a new reader that sees the
|
|
||||||
* changes.</p>
|
|
||||||
*
|
|
||||||
* @throws CorruptIndexException if the index is corrupt
|
|
||||||
* @throws IOException if there is a low-level IO error
|
|
||||||
* @throws UnsupportedOperationException unless overridden in subclass
|
|
||||||
*/
|
|
||||||
public boolean isCurrent() throws CorruptIndexException, IOException {
|
|
||||||
throw new UnsupportedOperationException("This reader does not support this method.");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Retrieve term vectors for this document, or null if
|
/** Retrieve term vectors for this document, or null if
|
||||||
|
@ -567,21 +333,6 @@ public abstract class IndexReader implements Closeable {
|
||||||
return vectors.terms(field);
|
return vectors.terms(field);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns <code>true</code> if an index exists at the specified directory.
|
|
||||||
* @param directory the directory to check for an index
|
|
||||||
* @return <code>true</code> if an index exists; <code>false</code> otherwise
|
|
||||||
* @throws IOException if there is a problem with accessing the index
|
|
||||||
*/
|
|
||||||
public static boolean indexExists(Directory directory) throws IOException {
|
|
||||||
try {
|
|
||||||
new SegmentInfos().read(directory);
|
|
||||||
return true;
|
|
||||||
} catch (IOException ioe) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Returns the number of documents in this index. */
|
/** Returns the number of documents in this index. */
|
||||||
public abstract int numDocs();
|
public abstract int numDocs();
|
||||||
|
|
||||||
|
@ -646,166 +397,6 @@ public abstract class IndexReader implements Closeable {
|
||||||
/** Returns true if any documents have been deleted */
|
/** Returns true if any documents have been deleted */
|
||||||
public abstract boolean hasDeletions();
|
public abstract boolean hasDeletions();
|
||||||
|
|
||||||
/** Returns true if there are norms stored for this field. */
|
|
||||||
public boolean hasNorms(String field) throws IOException {
|
|
||||||
// backward compatible implementation.
|
|
||||||
// SegmentReader has an efficient implementation.
|
|
||||||
ensureOpen();
|
|
||||||
return normValues(field) != null;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns {@link Fields} for this reader.
|
|
||||||
* This method may return null if the reader has no
|
|
||||||
* postings.
|
|
||||||
*
|
|
||||||
* <p><b>NOTE</b>: if this is a multi reader ({@link
|
|
||||||
* #getSequentialSubReaders} is not null) then this
|
|
||||||
* method will throw UnsupportedOperationException. If
|
|
||||||
* you really need a {@link Fields} for such a reader,
|
|
||||||
* use {@link MultiFields#getFields}. However, for
|
|
||||||
* performance reasons, it's best to get all sub-readers
|
|
||||||
* using {@link ReaderUtil#gatherSubReaders} and iterate
|
|
||||||
* through them yourself. */
|
|
||||||
public abstract Fields fields() throws IOException;
|
|
||||||
|
|
||||||
public final int docFreq(Term term) throws IOException {
|
|
||||||
return docFreq(term.field(), term.bytes());
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Returns the number of documents containing the term
|
|
||||||
* <code>t</code>. This method returns 0 if the term or
|
|
||||||
* field does not exists. This method does not take into
|
|
||||||
* account deleted documents that have not yet been merged
|
|
||||||
* away. */
|
|
||||||
public int docFreq(String field, BytesRef term) throws IOException {
|
|
||||||
final Fields fields = fields();
|
|
||||||
if (fields == null) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
final Terms terms = fields.terms(field);
|
|
||||||
if (terms == null) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
final TermsEnum termsEnum = terms.iterator(null);
|
|
||||||
if (termsEnum.seekExact(term, true)) {
|
|
||||||
return termsEnum.docFreq();
|
|
||||||
} else {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Returns the number of documents containing the term
|
|
||||||
* <code>t</code>. This method returns 0 if the term or
|
|
||||||
* field does not exists. This method does not take into
|
|
||||||
* account deleted documents that have not yet been merged
|
|
||||||
* away. */
|
|
||||||
public final long totalTermFreq(String field, BytesRef term) throws IOException {
|
|
||||||
final Fields fields = fields();
|
|
||||||
if (fields == null) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
final Terms terms = fields.terms(field);
|
|
||||||
if (terms == null) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
final TermsEnum termsEnum = terms.iterator(null);
|
|
||||||
if (termsEnum.seekExact(term, true)) {
|
|
||||||
return termsEnum.totalTermFreq();
|
|
||||||
} else {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/** This may return null if the field does not exist.*/
|
|
||||||
public final Terms terms(String field) throws IOException {
|
|
||||||
final Fields fields = fields();
|
|
||||||
if (fields == null) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
return fields.terms(field);
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Returns {@link DocsEnum} for the specified field &
|
|
||||||
* term. This may return null, if either the field or
|
|
||||||
* term does not exist. */
|
|
||||||
public final DocsEnum termDocsEnum(Bits liveDocs, String field, BytesRef term, boolean needsFreqs) throws IOException {
|
|
||||||
assert field != null;
|
|
||||||
assert term != null;
|
|
||||||
final Fields fields = fields();
|
|
||||||
if (fields != null) {
|
|
||||||
final Terms terms = fields.terms(field);
|
|
||||||
if (terms != null) {
|
|
||||||
final TermsEnum termsEnum = terms.iterator(null);
|
|
||||||
if (termsEnum.seekExact(term, true)) {
|
|
||||||
return termsEnum.docs(liveDocs, null, needsFreqs);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Returns {@link DocsAndPositionsEnum} for the specified
|
|
||||||
* field & term. This may return null, if either the
|
|
||||||
* field or term does not exist, or needsOffsets is
|
|
||||||
* true but offsets were not indexed for this field. */
|
|
||||||
public final DocsAndPositionsEnum termPositionsEnum(Bits liveDocs, String field, BytesRef term, boolean needsOffsets) throws IOException {
|
|
||||||
assert field != null;
|
|
||||||
assert term != null;
|
|
||||||
final Fields fields = fields();
|
|
||||||
if (fields != null) {
|
|
||||||
final Terms terms = fields.terms(field);
|
|
||||||
if (terms != null) {
|
|
||||||
final TermsEnum termsEnum = terms.iterator(null);
|
|
||||||
if (termsEnum.seekExact(term, true)) {
|
|
||||||
return termsEnum.docsAndPositions(liveDocs, null, needsOffsets);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns {@link DocsEnum} for the specified field and
|
|
||||||
* {@link TermState}. This may return null, if either the field or the term
|
|
||||||
* does not exists or the {@link TermState} is invalid for the underlying
|
|
||||||
* implementation.*/
|
|
||||||
public final DocsEnum termDocsEnum(Bits liveDocs, String field, BytesRef term, TermState state, boolean needsFreqs) throws IOException {
|
|
||||||
assert state != null;
|
|
||||||
assert field != null;
|
|
||||||
final Fields fields = fields();
|
|
||||||
if (fields != null) {
|
|
||||||
final Terms terms = fields.terms(field);
|
|
||||||
if (terms != null) {
|
|
||||||
final TermsEnum termsEnum = terms.iterator(null);
|
|
||||||
termsEnum.seekExact(term, state);
|
|
||||||
return termsEnum.docs(liveDocs, null, needsFreqs);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns {@link DocsAndPositionsEnum} for the specified field and
|
|
||||||
* {@link TermState}. This may return null, if either the field or the term
|
|
||||||
* does not exists, the {@link TermState} is invalid for the underlying
|
|
||||||
* implementation, or needsOffsets is true but offsets
|
|
||||||
* were not indexed for this field. */
|
|
||||||
public final DocsAndPositionsEnum termPositionsEnum(Bits liveDocs, String field, BytesRef term, TermState state, boolean needsOffsets) throws IOException {
|
|
||||||
assert state != null;
|
|
||||||
assert field != null;
|
|
||||||
final Fields fields = fields();
|
|
||||||
if (fields != null) {
|
|
||||||
final Terms terms = fields.terms(field);
|
|
||||||
if (terms != null) {
|
|
||||||
final TermsEnum termsEnum = terms.iterator(null);
|
|
||||||
termsEnum.seekExact(term, state);
|
|
||||||
return termsEnum.docsAndPositions(liveDocs, null, needsOffsets);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Closes files associated with this index.
|
* Closes files associated with this index.
|
||||||
* Also saves any new deletions to disk.
|
* Also saves any new deletions to disk.
|
||||||
|
@ -823,76 +414,12 @@ public abstract class IndexReader implements Closeable {
|
||||||
protected abstract void doClose() throws IOException;
|
protected abstract void doClose() throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the {@link FieldInfos} describing all fields in
|
* Expert: Returns a the root {@link IndexReaderContext} for this
|
||||||
* this reader. NOTE: do not make any changes to the
|
|
||||||
* returned FieldInfos!
|
|
||||||
*
|
|
||||||
* @lucene.experimental
|
|
||||||
*/
|
|
||||||
public abstract FieldInfos getFieldInfos();
|
|
||||||
|
|
||||||
/** Returns the {@link Bits} representing live (not
|
|
||||||
* deleted) docs. A set bit indicates the doc ID has not
|
|
||||||
* been deleted. If this method returns null it means
|
|
||||||
* there are no deleted documents (all documents are
|
|
||||||
* live).
|
|
||||||
*
|
|
||||||
* The returned instance has been safely published for
|
|
||||||
* use by multiple threads without additional
|
|
||||||
* synchronization.
|
|
||||||
* @lucene.experimental */
|
|
||||||
public abstract Bits getLiveDocs();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Expert: return the IndexCommit that this reader has
|
|
||||||
* opened. This method is only implemented by those
|
|
||||||
* readers that correspond to a Directory with its own
|
|
||||||
* segments_N file.
|
|
||||||
*
|
|
||||||
* @lucene.experimental
|
|
||||||
*/
|
|
||||||
public IndexCommit getIndexCommit() throws IOException {
|
|
||||||
throw new UnsupportedOperationException("This reader does not support this method.");
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Returns all commit points that exist in the Directory.
|
|
||||||
* Normally, because the default is {@link
|
|
||||||
* KeepOnlyLastCommitDeletionPolicy}, there would be only
|
|
||||||
* one commit point. But if you're using a custom {@link
|
|
||||||
* IndexDeletionPolicy} then there could be many commits.
|
|
||||||
* Once you have a given commit, you can open a reader on
|
|
||||||
* it by calling {@link IndexReader#open(IndexCommit)}
|
|
||||||
* There must be at least one commit in
|
|
||||||
* the Directory, else this method throws {@link
|
|
||||||
* IndexNotFoundException}. Note that if a commit is in
|
|
||||||
* progress while this method is running, that commit
|
|
||||||
* may or may not be returned.
|
|
||||||
*
|
|
||||||
* @return a sorted list of {@link IndexCommit}s, from oldest
|
|
||||||
* to latest. */
|
|
||||||
public static List<IndexCommit> listCommits(Directory dir) throws IOException {
|
|
||||||
return DirectoryReader.listCommits(dir);
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Expert: returns the sequential sub readers that this
|
|
||||||
* reader is logically composed of. If this reader is not composed
|
|
||||||
* of sequential child readers, it should return null.
|
|
||||||
* If this method returns an empty array, that means this
|
|
||||||
* reader is a null reader (for example a MultiReader
|
|
||||||
* that has no sub readers).
|
|
||||||
*/
|
|
||||||
public IndexReader[] getSequentialSubReaders() {
|
|
||||||
ensureOpen();
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Expert: Returns a the root {@link ReaderContext} for this
|
|
||||||
* {@link IndexReader}'s sub-reader tree. Iff this reader is composed of sub
|
* {@link IndexReader}'s sub-reader tree. Iff this reader is composed of sub
|
||||||
* readers ,ie. this reader being a composite reader, this method returns a
|
* readers ,ie. this reader being a composite reader, this method returns a
|
||||||
* {@link CompositeReaderContext} holding the reader's direct children as well as a
|
* {@link CompositeReaderContext} holding the reader's direct children as well as a
|
||||||
* view of the reader tree's atomic leaf contexts. All sub-
|
* view of the reader tree's atomic leaf contexts. All sub-
|
||||||
* {@link ReaderContext} instances referenced from this readers top-level
|
* {@link IndexReaderContext} instances referenced from this readers top-level
|
||||||
* context are private to this reader and are not shared with another context
|
* context are private to this reader and are not shared with another context
|
||||||
* tree. For example, IndexSearcher uses this API to drive searching by one
|
* tree. For example, IndexSearcher uses this API to drive searching by one
|
||||||
* atomic leaf reader at a time. If this reader is not composed of child
|
* atomic leaf reader at a time. If this reader is not composed of child
|
||||||
|
@ -905,7 +432,7 @@ public abstract class IndexReader implements Closeable {
|
||||||
*
|
*
|
||||||
* @lucene.experimental
|
* @lucene.experimental
|
||||||
*/
|
*/
|
||||||
public abstract ReaderContext getTopReaderContext();
|
public abstract IndexReaderContext getTopReaderContext();
|
||||||
|
|
||||||
/** Expert: Returns a key for this IndexReader, so FieldCache/CachingWrapperFilter can find
|
/** Expert: Returns a key for this IndexReader, so FieldCache/CachingWrapperFilter can find
|
||||||
* it again.
|
* it again.
|
||||||
|
@ -925,190 +452,14 @@ public abstract class IndexReader implements Closeable {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Returns the number of unique terms (across all fields)
|
public final int docFreq(Term term) throws IOException {
|
||||||
* in this reader.
|
return docFreq(term.field(), term.bytes());
|
||||||
*
|
|
||||||
* @return number of unique terms or -1 if this count
|
|
||||||
* cannot be easily determined (eg Multi*Readers).
|
|
||||||
* Instead, you should call {@link
|
|
||||||
* #getSequentialSubReaders} and ask each sub reader for
|
|
||||||
* its unique term count. */
|
|
||||||
public final long getUniqueTermCount() throws IOException {
|
|
||||||
if (!getTopReaderContext().isAtomic) {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
final Fields fields = fields();
|
|
||||||
if (fields == null) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
return fields.getUniqueTermCount();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/** For IndexReader implementations that use
|
/** Returns the number of documents containing the term
|
||||||
* TermInfosReader to read terms, this returns the
|
* <code>t</code>. This method returns 0 if the term or
|
||||||
* current indexDivisor as specified when the reader was
|
* field does not exists. This method does not take into
|
||||||
* opened.
|
* account deleted documents that have not yet been merged
|
||||||
*/
|
* away. */
|
||||||
public int getTermInfosIndexDivisor() {
|
public abstract int docFreq(String field, BytesRef term) throws IOException;
|
||||||
throw new UnsupportedOperationException("This reader does not support this method.");
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns {@link DocValues} for this field.
|
|
||||||
* This method may return null if the reader has no per-document
|
|
||||||
* values stored.
|
|
||||||
*
|
|
||||||
* <p><b>NOTE</b>: if this is a multi reader ({@link
|
|
||||||
* #getSequentialSubReaders} is not null) then this
|
|
||||||
* method will throw UnsupportedOperationException. If
|
|
||||||
* you really need {@link DocValues} for such a reader,
|
|
||||||
* use {@link MultiDocValues#getDocValues(IndexReader,String)}. However, for
|
|
||||||
* performance reasons, it's best to get all sub-readers
|
|
||||||
* using {@link ReaderUtil#gatherSubReaders} and iterate
|
|
||||||
* through them yourself. */
|
|
||||||
public abstract DocValues docValues(String field) throws IOException;
|
|
||||||
|
|
||||||
public abstract DocValues normValues(String field) throws IOException;
|
|
||||||
|
|
||||||
private volatile Fields fields;
|
|
||||||
|
|
||||||
/** @lucene.internal */
|
|
||||||
void storeFields(Fields fields) {
|
|
||||||
ensureOpen();
|
|
||||||
this.fields = fields;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** @lucene.internal */
|
|
||||||
Fields retrieveFields() {
|
|
||||||
ensureOpen();
|
|
||||||
return fields;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* A struct like class that represents a hierarchical relationship between
|
|
||||||
* {@link IndexReader} instances.
|
|
||||||
* @lucene.experimental
|
|
||||||
*/
|
|
||||||
public static abstract class ReaderContext {
|
|
||||||
/** The reader context for this reader's immediate parent, or null if none */
|
|
||||||
public final ReaderContext parent;
|
|
||||||
/** The actual reader */
|
|
||||||
public final IndexReader reader;
|
|
||||||
/** <code>true</code> iff the reader is an atomic reader */
|
|
||||||
public final boolean isAtomic;
|
|
||||||
/** <code>true</code> if this context struct represents the top level reader within the hierarchical context */
|
|
||||||
public final boolean isTopLevel;
|
|
||||||
/** the doc base for this reader in the parent, <tt>0</tt> if parent is null */
|
|
||||||
public final int docBaseInParent;
|
|
||||||
/** the ord for this reader in the parent, <tt>0</tt> if parent is null */
|
|
||||||
public final int ordInParent;
|
|
||||||
|
|
||||||
ReaderContext(ReaderContext parent, IndexReader reader,
|
|
||||||
boolean isAtomic, int ordInParent, int docBaseInParent) {
|
|
||||||
this.parent = parent;
|
|
||||||
this.reader = reader;
|
|
||||||
this.isAtomic = isAtomic;
|
|
||||||
this.docBaseInParent = docBaseInParent;
|
|
||||||
this.ordInParent = ordInParent;
|
|
||||||
this.isTopLevel = parent==null;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the context's leaves if this context is a top-level context
|
|
||||||
* otherwise <code>null</code>.
|
|
||||||
* <p>
|
|
||||||
* Note: this is convenience method since leaves can always be obtained by
|
|
||||||
* walking the context tree.
|
|
||||||
*/
|
|
||||||
public AtomicReaderContext[] leaves() {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the context's children iff this context is a composite context
|
|
||||||
* otherwise <code>null</code>.
|
|
||||||
* <p>
|
|
||||||
* Note: this method is a convenience method to prevent
|
|
||||||
* <code>instanceof</code> checks and type-casts to
|
|
||||||
* {@link CompositeReaderContext}.
|
|
||||||
*/
|
|
||||||
public ReaderContext[] children() {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* {@link ReaderContext} for composite {@link IndexReader} instance.
|
|
||||||
* @lucene.experimental
|
|
||||||
*/
|
|
||||||
public static final class CompositeReaderContext extends ReaderContext {
|
|
||||||
/** the composite readers immediate children */
|
|
||||||
public final ReaderContext[] children;
|
|
||||||
/** the composite readers leaf reader contexts if this is the top level reader in this context */
|
|
||||||
public final AtomicReaderContext[] leaves;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a {@link CompositeReaderContext} for intermediate readers that aren't
|
|
||||||
* not top-level readers in the current context
|
|
||||||
*/
|
|
||||||
public CompositeReaderContext(ReaderContext parent, IndexReader reader,
|
|
||||||
int ordInParent, int docbaseInParent, ReaderContext[] children) {
|
|
||||||
this(parent, reader, ordInParent, docbaseInParent, children, null);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a {@link CompositeReaderContext} for top-level readers with parent set to <code>null</code>
|
|
||||||
*/
|
|
||||||
public CompositeReaderContext(IndexReader reader, ReaderContext[] children, AtomicReaderContext[] leaves) {
|
|
||||||
this(null, reader, 0, 0, children, leaves);
|
|
||||||
}
|
|
||||||
|
|
||||||
private CompositeReaderContext(ReaderContext parent, IndexReader reader,
|
|
||||||
int ordInParent, int docbaseInParent, ReaderContext[] children,
|
|
||||||
AtomicReaderContext[] leaves) {
|
|
||||||
super(parent, reader, false, ordInParent, docbaseInParent);
|
|
||||||
this.children = children;
|
|
||||||
this.leaves = leaves;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public AtomicReaderContext[] leaves() {
|
|
||||||
return leaves;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public ReaderContext[] children() {
|
|
||||||
return children;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* {@link ReaderContext} for atomic {@link IndexReader} instances
|
|
||||||
* @lucene.experimental
|
|
||||||
*/
|
|
||||||
public static final class AtomicReaderContext extends ReaderContext {
|
|
||||||
/** The readers ord in the top-level's leaves array */
|
|
||||||
public final int ord;
|
|
||||||
/** The readers absolute doc base */
|
|
||||||
public final int docBase;
|
|
||||||
/**
|
|
||||||
* Creates a new {@link AtomicReaderContext}
|
|
||||||
*/
|
|
||||||
public AtomicReaderContext(ReaderContext parent, IndexReader reader,
|
|
||||||
int ord, int docBase, int leafOrd, int leafDocBase) {
|
|
||||||
super(parent, reader, true, ord, docBase);
|
|
||||||
assert reader.getSequentialSubReaders() == null : "Atomic readers must not have subreaders";
|
|
||||||
this.ord = leafOrd;
|
|
||||||
this.docBase = leafDocBase;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new {@link AtomicReaderContext} for a atomic reader without an immediate
|
|
||||||
* parent.
|
|
||||||
*/
|
|
||||||
public AtomicReaderContext(IndexReader atomicReader) {
|
|
||||||
this(null, atomicReader, 0, 0, 0, 0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,64 @@
|
||||||
|
package org.apache.lucene.index;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A struct like class that represents a hierarchical relationship between
|
||||||
|
* {@link IndexReader} instances.
|
||||||
|
* @lucene.experimental
|
||||||
|
*/
|
||||||
|
public abstract class IndexReaderContext {
|
||||||
|
/** The reader context for this reader's immediate parent, or null if none */
|
||||||
|
public final CompositeReaderContext parent;
|
||||||
|
/** <code>true</code> if this context struct represents the top level reader within the hierarchical context */
|
||||||
|
public final boolean isTopLevel;
|
||||||
|
/** the doc base for this reader in the parent, <tt>0</tt> if parent is null */
|
||||||
|
public final int docBaseInParent;
|
||||||
|
/** the ord for this reader in the parent, <tt>0</tt> if parent is null */
|
||||||
|
public final int ordInParent;
|
||||||
|
|
||||||
|
IndexReaderContext(CompositeReaderContext parent, int ordInParent, int docBaseInParent) {
|
||||||
|
if (!(this instanceof CompositeReaderContext || this instanceof AtomicReaderContext))
|
||||||
|
throw new Error("This class should never be extended by custom code!");
|
||||||
|
this.parent = parent;
|
||||||
|
this.docBaseInParent = docBaseInParent;
|
||||||
|
this.ordInParent = ordInParent;
|
||||||
|
this.isTopLevel = parent==null;
|
||||||
|
}
|
||||||
|
|
||||||
|
public abstract IndexReader reader();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the context's leaves if this context is a top-level context
|
||||||
|
* otherwise <code>null</code>.
|
||||||
|
* <p>
|
||||||
|
* Note: this is convenience method since leaves can always be obtained by
|
||||||
|
* walking the context tree.
|
||||||
|
*/
|
||||||
|
public abstract AtomicReaderContext[] leaves();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the context's children iff this context is a composite context
|
||||||
|
* otherwise <code>null</code>.
|
||||||
|
* <p>
|
||||||
|
* Note: this method is a convenience method to prevent
|
||||||
|
* <code>instanceof</code> checks and type-casts to
|
||||||
|
* {@link CompositeReaderContext}.
|
||||||
|
*/
|
||||||
|
public abstract IndexReaderContext[] children();
|
||||||
|
}
|
|
@ -134,7 +134,7 @@ public final class IndexUpgrader {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void upgrade() throws IOException {
|
public void upgrade() throws IOException {
|
||||||
if (!IndexReader.indexExists(dir)) {
|
if (!DirectoryReader.indexExists(dir)) {
|
||||||
throw new IndexNotFoundException(dir.toString());
|
throw new IndexNotFoundException(dir.toString());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -264,7 +264,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
|
||||||
// The PayloadProcessorProvider to use when segments are merged
|
// The PayloadProcessorProvider to use when segments are merged
|
||||||
private PayloadProcessorProvider payloadProcessorProvider;
|
private PayloadProcessorProvider payloadProcessorProvider;
|
||||||
|
|
||||||
IndexReader getReader() throws IOException {
|
DirectoryReader getReader() throws IOException {
|
||||||
return getReader(true);
|
return getReader(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -327,7 +327,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
|
||||||
*
|
*
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
IndexReader getReader(boolean applyAllDeletes) throws IOException {
|
DirectoryReader getReader(boolean applyAllDeletes) throws IOException {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
|
|
||||||
final long tStart = System.currentTimeMillis();
|
final long tStart = System.currentTimeMillis();
|
||||||
|
@ -339,7 +339,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
|
||||||
// obtained during this flush are pooled, the first time
|
// obtained during this flush are pooled, the first time
|
||||||
// this method is called:
|
// this method is called:
|
||||||
poolReaders = true;
|
poolReaders = true;
|
||||||
final IndexReader r;
|
final DirectoryReader r;
|
||||||
doBeforeFlush();
|
doBeforeFlush();
|
||||||
boolean anySegmentFlushed = false;
|
boolean anySegmentFlushed = false;
|
||||||
/*
|
/*
|
||||||
|
@ -871,7 +871,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
|
||||||
create = false;
|
create = false;
|
||||||
} else {
|
} else {
|
||||||
// CREATE_OR_APPEND - create only if an index does not exist
|
// CREATE_OR_APPEND - create only if an index does not exist
|
||||||
create = !IndexReader.indexExists(directory);
|
create = !DirectoryReader.indexExists(directory);
|
||||||
}
|
}
|
||||||
|
|
||||||
// If index is too old, reading the segments will throw
|
// If index is too old, reading the segments will throw
|
||||||
|
@ -2631,7 +2631,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
|
||||||
* @param commitUserData Opaque Map (String->String)
|
* @param commitUserData Opaque Map (String->String)
|
||||||
* that's recorded into the segments file in the index,
|
* that's recorded into the segments file in the index,
|
||||||
* and retrievable by {@link
|
* and retrievable by {@link
|
||||||
* IndexReader#getCommitUserData}. Note that when
|
* DirectoryReader#getCommitUserData}. Note that when
|
||||||
* IndexWriter commits itself during {@link #close}, the
|
* IndexWriter commits itself during {@link #close}, the
|
||||||
* commitUserData is unchanged (just carried over from
|
* commitUserData is unchanged (just carried over from
|
||||||
* the prior commit). If this is null then the previous
|
* the prior commit). If this is null then the previous
|
||||||
|
@ -3954,7 +3954,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
|
||||||
* <p><b>NOTE</b>: warm is called before any deletes have
|
* <p><b>NOTE</b>: warm is called before any deletes have
|
||||||
* been carried over to the merged segment. */
|
* been carried over to the merged segment. */
|
||||||
public static abstract class IndexReaderWarmer {
|
public static abstract class IndexReaderWarmer {
|
||||||
public abstract void warm(IndexReader reader) throws IOException;
|
public abstract void warm(AtomicReader reader) throws IOException;
|
||||||
}
|
}
|
||||||
|
|
||||||
private void handleOOM(OutOfMemoryError oom, String location) {
|
private void handleOOM(OutOfMemoryError oom, String location) {
|
||||||
|
|
|
@ -90,7 +90,7 @@ public final class IndexWriterConfig implements Cloneable {
|
||||||
public final static boolean DEFAULT_READER_POOLING = false;
|
public final static boolean DEFAULT_READER_POOLING = false;
|
||||||
|
|
||||||
/** Default value is 1. Change using {@link #setReaderTermsIndexDivisor(int)}. */
|
/** Default value is 1. Change using {@link #setReaderTermsIndexDivisor(int)}. */
|
||||||
public static final int DEFAULT_READER_TERMS_INDEX_DIVISOR = IndexReader.DEFAULT_TERMS_INDEX_DIVISOR;
|
public static final int DEFAULT_READER_TERMS_INDEX_DIVISOR = DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR;
|
||||||
|
|
||||||
/** Default value is 1945. Change using {@link #setRAMPerThreadHardLimitMB(int)} */
|
/** Default value is 1945. Change using {@link #setRAMPerThreadHardLimitMB(int)} */
|
||||||
public static final int DEFAULT_RAM_PER_THREAD_HARD_LIMIT_MB = 1945;
|
public static final int DEFAULT_RAM_PER_THREAD_HARD_LIMIT_MB = 1945;
|
||||||
|
|
|
@ -31,10 +31,10 @@ import org.apache.lucene.util.InfoStream;
|
||||||
public class MergeState {
|
public class MergeState {
|
||||||
|
|
||||||
public static class IndexReaderAndLiveDocs {
|
public static class IndexReaderAndLiveDocs {
|
||||||
public final IndexReader reader;
|
public final AtomicReader reader;
|
||||||
public final Bits liveDocs;
|
public final Bits liveDocs;
|
||||||
|
|
||||||
public IndexReaderAndLiveDocs(IndexReader reader, Bits liveDocs) {
|
public IndexReaderAndLiveDocs(AtomicReader reader, Bits liveDocs) {
|
||||||
this.reader = reader;
|
this.reader = reader;
|
||||||
this.liveDocs = liveDocs;
|
this.liveDocs = liveDocs;
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,7 +33,7 @@ import org.apache.lucene.util.ReaderUtil.Gather;
|
||||||
import org.apache.lucene.util.packed.PackedInts.Reader;
|
import org.apache.lucene.util.packed.PackedInts.Reader;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A wrapper for compound IndexReader providing access to per segment
|
* A wrapper for CompositeIndexReader providing access to per segment
|
||||||
* {@link DocValues}
|
* {@link DocValues}
|
||||||
*
|
*
|
||||||
* @lucene.experimental
|
* @lucene.experimental
|
||||||
|
@ -43,11 +43,11 @@ public class MultiDocValues extends DocValues {
|
||||||
|
|
||||||
private static DocValuesPuller DEFAULT_PULLER = new DocValuesPuller();
|
private static DocValuesPuller DEFAULT_PULLER = new DocValuesPuller();
|
||||||
private static final DocValuesPuller NORMS_PULLER = new DocValuesPuller() {
|
private static final DocValuesPuller NORMS_PULLER = new DocValuesPuller() {
|
||||||
public DocValues pull(IndexReader reader, String field) throws IOException {
|
public DocValues pull(AtomicReader reader, String field) throws IOException {
|
||||||
return reader.normValues(field);
|
return reader.normValues(field);
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean stopLoadingOnNull(IndexReader reader, String field) throws IOException {
|
public boolean stopLoadingOnNull(AtomicReader reader, String field) throws IOException {
|
||||||
// for norms we drop all norms if one leaf reader has no norms and the field is present
|
// for norms we drop all norms if one leaf reader has no norms and the field is present
|
||||||
FieldInfos fieldInfos = reader.getFieldInfos();
|
FieldInfos fieldInfos = reader.getFieldInfos();
|
||||||
FieldInfo fieldInfo = fieldInfos.fieldInfo(field);
|
FieldInfo fieldInfo = fieldInfos.fieldInfo(field);
|
||||||
|
@ -69,11 +69,11 @@ public class MultiDocValues extends DocValues {
|
||||||
}
|
}
|
||||||
|
|
||||||
private static class DocValuesPuller {
|
private static class DocValuesPuller {
|
||||||
public DocValues pull(IndexReader reader, String field) throws IOException {
|
public DocValues pull(AtomicReader reader, String field) throws IOException {
|
||||||
return reader.docValues(field);
|
return reader.docValues(field);
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean stopLoadingOnNull(IndexReader reader, String field) throws IOException {
|
public boolean stopLoadingOnNull(AtomicReader reader, String field) throws IOException {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -115,11 +115,13 @@ public class MultiDocValues extends DocValues {
|
||||||
|
|
||||||
|
|
||||||
private static DocValues getDocValues(IndexReader r, final String field, final DocValuesPuller puller) throws IOException {
|
private static DocValues getDocValues(IndexReader r, final String field, final DocValuesPuller puller) throws IOException {
|
||||||
final IndexReader[] subs = r.getSequentialSubReaders();
|
if (r instanceof AtomicReader) {
|
||||||
if (subs == null) {
|
|
||||||
// already an atomic reader
|
// already an atomic reader
|
||||||
return puller.pull(r, field);
|
return puller.pull((AtomicReader) r, field);
|
||||||
} else if (subs.length == 0) {
|
}
|
||||||
|
assert r instanceof CompositeReader;
|
||||||
|
final IndexReader[] subs = ((CompositeReader) r).getSequentialSubReaders();
|
||||||
|
if (subs.length == 0) {
|
||||||
// no fields
|
// no fields
|
||||||
return null;
|
return null;
|
||||||
} else if (subs.length == 1) {
|
} else if (subs.length == 1) {
|
||||||
|
@ -136,7 +138,7 @@ public class MultiDocValues extends DocValues {
|
||||||
new ReaderUtil.Gather(r) {
|
new ReaderUtil.Gather(r) {
|
||||||
boolean stop = false;
|
boolean stop = false;
|
||||||
@Override
|
@Override
|
||||||
protected void add(int base, IndexReader r) throws IOException {
|
protected void add(int base, AtomicReader r) throws IOException {
|
||||||
if (stop) {
|
if (stop) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,6 +21,8 @@ import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.Collection;
|
||||||
|
import java.util.HashSet;
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
|
|
||||||
import org.apache.lucene.util.Bits;
|
import org.apache.lucene.util.Bits;
|
||||||
|
@ -59,59 +61,50 @@ public final class MultiFields extends Fields {
|
||||||
* Gather}) and iterate through them
|
* Gather}) and iterate through them
|
||||||
* yourself. */
|
* yourself. */
|
||||||
public static Fields getFields(IndexReader r) throws IOException {
|
public static Fields getFields(IndexReader r) throws IOException {
|
||||||
final IndexReader[] subs = r.getSequentialSubReaders();
|
if (r instanceof AtomicReader) {
|
||||||
if (subs == null) {
|
|
||||||
// already an atomic reader
|
// already an atomic reader
|
||||||
return r.fields();
|
return ((AtomicReader) r).fields();
|
||||||
} else if (subs.length == 0) {
|
}
|
||||||
|
assert r instanceof CompositeReader;
|
||||||
|
final IndexReader[] subs = ((CompositeReader) r).getSequentialSubReaders();
|
||||||
|
if (subs.length == 0) {
|
||||||
// no fields
|
// no fields
|
||||||
return null;
|
return null;
|
||||||
} else if (subs.length == 1) {
|
|
||||||
return getFields(subs[0]);
|
|
||||||
} else {
|
} else {
|
||||||
|
final List<Fields> fields = new ArrayList<Fields>();
|
||||||
|
final List<ReaderUtil.Slice> slices = new ArrayList<ReaderUtil.Slice>();
|
||||||
|
|
||||||
Fields currentFields = r.retrieveFields();
|
new ReaderUtil.Gather(r) {
|
||||||
if (currentFields == null) {
|
@Override
|
||||||
|
protected void add(int base, AtomicReader r) throws IOException {
|
||||||
final List<Fields> fields = new ArrayList<Fields>();
|
final Fields f = r.fields();
|
||||||
final List<ReaderUtil.Slice> slices = new ArrayList<ReaderUtil.Slice>();
|
if (f != null) {
|
||||||
|
fields.add(f);
|
||||||
new ReaderUtil.Gather(r) {
|
slices.add(new ReaderUtil.Slice(base, r.maxDoc(), fields.size()-1));
|
||||||
@Override
|
|
||||||
protected void add(int base, IndexReader r) throws IOException {
|
|
||||||
final Fields f = r.fields();
|
|
||||||
if (f != null) {
|
|
||||||
fields.add(f);
|
|
||||||
slices.add(new ReaderUtil.Slice(base, r.maxDoc(), fields.size()-1));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}.run();
|
|
||||||
|
|
||||||
if (fields.size() == 0) {
|
|
||||||
return null;
|
|
||||||
} else if (fields.size() == 1) {
|
|
||||||
currentFields = fields.get(0);
|
|
||||||
} else {
|
|
||||||
currentFields = new MultiFields(fields.toArray(Fields.EMPTY_ARRAY),
|
|
||||||
slices.toArray(ReaderUtil.Slice.EMPTY_ARRAY));
|
|
||||||
}
|
}
|
||||||
r.storeFields(currentFields);
|
}.run();
|
||||||
|
|
||||||
|
if (fields.isEmpty()) {
|
||||||
|
return null;
|
||||||
|
} else if (fields.size() == 1) {
|
||||||
|
return fields.get(0);
|
||||||
|
} else {
|
||||||
|
return new MultiFields(fields.toArray(Fields.EMPTY_ARRAY),
|
||||||
|
slices.toArray(ReaderUtil.Slice.EMPTY_ARRAY));
|
||||||
}
|
}
|
||||||
return currentFields;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static Bits getLiveDocs(IndexReader r) {
|
public static Bits getLiveDocs(IndexReader r) {
|
||||||
Bits result;
|
|
||||||
if (r.hasDeletions()) {
|
if (r.hasDeletions()) {
|
||||||
|
|
||||||
final List<Bits> liveDocs = new ArrayList<Bits>();
|
final List<Bits> liveDocs = new ArrayList<Bits>();
|
||||||
final List<Integer> starts = new ArrayList<Integer>();
|
final List<Integer> starts = new ArrayList<Integer>();
|
||||||
|
|
||||||
try {
|
try {
|
||||||
final int maxDoc = new ReaderUtil.Gather(r) {
|
final int maxDoc = new ReaderUtil.Gather(r) {
|
||||||
@Override
|
@Override
|
||||||
protected void add(int base, IndexReader r) throws IOException {
|
protected void add(int base, AtomicReader r) throws IOException {
|
||||||
// record all liveDocs, even if they are null
|
// record all liveDocs, even if they are null
|
||||||
liveDocs.add(r.getLiveDocs());
|
liveDocs.add(r.getLiveDocs());
|
||||||
starts.add(base);
|
starts.add(base);
|
||||||
|
@ -126,16 +119,13 @@ public final class MultiFields extends Fields {
|
||||||
assert liveDocs.size() > 0;
|
assert liveDocs.size() > 0;
|
||||||
if (liveDocs.size() == 1) {
|
if (liveDocs.size() == 1) {
|
||||||
// Only one actual sub reader -- optimize this case
|
// Only one actual sub reader -- optimize this case
|
||||||
result = liveDocs.get(0);
|
return liveDocs.get(0);
|
||||||
} else {
|
} else {
|
||||||
result = new MultiBits(liveDocs, starts, true);
|
return new MultiBits(liveDocs, starts, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
result = null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
return result;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/** This method may return null if the field does not exist.*/
|
/** This method may return null if the field does not exist.*/
|
||||||
|
@ -237,6 +227,11 @@ public final class MultiFields extends Fields {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getUniqueFieldCount() {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
public static long totalTermFreq(IndexReader r, String field, BytesRef text) throws IOException {
|
public static long totalTermFreq(IndexReader r, String field, BytesRef text) throws IOException {
|
||||||
final Terms terms = getTerms(r, field);
|
final Terms terms = getTerms(r, field);
|
||||||
if (terms != null) {
|
if (terms != null) {
|
||||||
|
@ -248,9 +243,26 @@ public final class MultiFields extends Fields {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
/** Call this to get the (merged) FieldInfos for a
|
||||||
public int getUniqueFieldCount() {
|
* composite reader */
|
||||||
return -1;
|
public static FieldInfos getMergedFieldInfos(IndexReader reader) {
|
||||||
|
final List<AtomicReader> subReaders = new ArrayList<AtomicReader>();
|
||||||
|
ReaderUtil.gatherSubReaders(subReaders, reader);
|
||||||
|
final FieldInfos fieldInfos = new FieldInfos();
|
||||||
|
for(AtomicReader subReader : subReaders) {
|
||||||
|
fieldInfos.add(subReader.getFieldInfos());
|
||||||
|
}
|
||||||
|
return fieldInfos;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static Collection<String> getIndexedFields(IndexReader reader) {
|
||||||
|
final Collection<String> fields = new HashSet<String>();
|
||||||
|
for(FieldInfo fieldInfo : getMergedFieldInfos(reader)) {
|
||||||
|
if (fieldInfo.isIndexed) {
|
||||||
|
fields.add(fieldInfo.name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fields;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -22,7 +22,7 @@ import java.io.IOException;
|
||||||
/** An IndexReader which reads multiple indexes, appending
|
/** An IndexReader which reads multiple indexes, appending
|
||||||
* their content. */
|
* their content. */
|
||||||
public class MultiReader extends BaseMultiReader<IndexReader> {
|
public class MultiReader extends BaseMultiReader<IndexReader> {
|
||||||
private final boolean[] decrefOnClose; // remember which subreaders to decRef on close
|
private final boolean closeSubReaders;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* <p>Construct a MultiReader aggregating the named set of (sub)readers.
|
* <p>Construct a MultiReader aggregating the named set of (sub)readers.
|
||||||
|
@ -41,80 +41,23 @@ public class MultiReader extends BaseMultiReader<IndexReader> {
|
||||||
*/
|
*/
|
||||||
public MultiReader(IndexReader[] subReaders, boolean closeSubReaders) throws IOException {
|
public MultiReader(IndexReader[] subReaders, boolean closeSubReaders) throws IOException {
|
||||||
super(subReaders.clone());
|
super(subReaders.clone());
|
||||||
decrefOnClose = new boolean[subReaders.length];
|
this.closeSubReaders = closeSubReaders;
|
||||||
for (int i = 0; i < subReaders.length; i++) {
|
if (!closeSubReaders) {
|
||||||
if (!closeSubReaders) {
|
for (int i = 0; i < subReaders.length; i++) {
|
||||||
subReaders[i].incRef();
|
subReaders[i].incRef();
|
||||||
decrefOnClose[i] = true;
|
|
||||||
} else {
|
|
||||||
decrefOnClose[i] = false;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// used only by openIfChaged
|
|
||||||
private MultiReader(IndexReader[] subReaders, boolean[] decrefOnClose)
|
|
||||||
throws IOException {
|
|
||||||
super(subReaders);
|
|
||||||
this.decrefOnClose = decrefOnClose;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
protected synchronized IndexReader doOpenIfChanged() throws CorruptIndexException, IOException {
|
|
||||||
ensureOpen();
|
|
||||||
|
|
||||||
boolean changed = false;
|
|
||||||
IndexReader[] newSubReaders = new IndexReader[subReaders.length];
|
|
||||||
|
|
||||||
boolean success = false;
|
|
||||||
try {
|
|
||||||
for (int i = 0; i < subReaders.length; i++) {
|
|
||||||
final IndexReader newSubReader = IndexReader.openIfChanged(subReaders[i]);
|
|
||||||
if (newSubReader != null) {
|
|
||||||
newSubReaders[i] = newSubReader;
|
|
||||||
changed = true;
|
|
||||||
} else {
|
|
||||||
newSubReaders[i] = subReaders[i];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
success = true;
|
|
||||||
} finally {
|
|
||||||
if (!success && changed) {
|
|
||||||
for (int i = 0; i < newSubReaders.length; i++) {
|
|
||||||
if (newSubReaders[i] != subReaders[i]) {
|
|
||||||
try {
|
|
||||||
newSubReaders[i].close();
|
|
||||||
} catch (IOException ignore) {
|
|
||||||
// keep going - we want to clean up as much as possible
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (changed) {
|
|
||||||
boolean[] newDecrefOnClose = new boolean[subReaders.length];
|
|
||||||
for (int i = 0; i < subReaders.length; i++) {
|
|
||||||
if (newSubReaders[i] == subReaders[i]) {
|
|
||||||
newSubReaders[i].incRef();
|
|
||||||
newDecrefOnClose[i] = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return new MultiReader(newSubReaders, newDecrefOnClose);
|
|
||||||
} else {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected synchronized void doClose() throws IOException {
|
protected synchronized void doClose() throws IOException {
|
||||||
IOException ioe = null;
|
IOException ioe = null;
|
||||||
for (int i = 0; i < subReaders.length; i++) {
|
for (int i = 0; i < subReaders.length; i++) {
|
||||||
try {
|
try {
|
||||||
if (decrefOnClose[i]) {
|
if (closeSubReaders) {
|
||||||
subReaders[i].decRef();
|
|
||||||
} else {
|
|
||||||
subReaders[i].close();
|
subReaders[i].close();
|
||||||
|
} else {
|
||||||
|
subReaders[i].decRef();
|
||||||
}
|
}
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
if (ioe == null) ioe = e;
|
if (ioe == null) ioe = e;
|
||||||
|
@ -123,25 +66,4 @@ public class MultiReader extends BaseMultiReader<IndexReader> {
|
||||||
// throw the first exception
|
// throw the first exception
|
||||||
if (ioe != null) throw ioe;
|
if (ioe != null) throw ioe;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean isCurrent() throws CorruptIndexException, IOException {
|
|
||||||
ensureOpen();
|
|
||||||
for (int i = 0; i < subReaders.length; i++) {
|
|
||||||
if (!subReaders[i].isCurrent()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// all subreaders are up to date
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Not implemented.
|
|
||||||
* @throws UnsupportedOperationException
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
public long getVersion() {
|
|
||||||
throw new UnsupportedOperationException("MultiReader does not support this method.");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,10 +22,9 @@ import java.util.*;
|
||||||
|
|
||||||
import org.apache.lucene.util.Bits;
|
import org.apache.lucene.util.Bits;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.apache.lucene.util.ReaderUtil;
|
|
||||||
|
|
||||||
|
|
||||||
/** An IndexReader which reads multiple, parallel indexes. Each index added
|
/** An AtomicIndexReader which reads multiple, parallel indexes. Each index added
|
||||||
* must have the same number of documents, but typically each contains
|
* must have the same number of documents, but typically each contains
|
||||||
* different fields. Each document contains the union of the fields of all
|
* different fields. Each document contains the union of the fields of all
|
||||||
* documents with the same document number. When searching, matches for a
|
* documents with the same document number. When searching, matches for a
|
||||||
|
@ -42,15 +41,14 @@ import org.apache.lucene.util.ReaderUtil;
|
||||||
* same order to the other indexes. <em>Failure to do so will result in
|
* same order to the other indexes. <em>Failure to do so will result in
|
||||||
* undefined behavior</em>.
|
* undefined behavior</em>.
|
||||||
*/
|
*/
|
||||||
public class ParallelReader extends IndexReader {
|
public class ParallelReader extends AtomicReader {
|
||||||
private List<IndexReader> readers = new ArrayList<IndexReader>();
|
private List<AtomicReader> readers = new ArrayList<AtomicReader>();
|
||||||
private List<Boolean> decrefOnClose = new ArrayList<Boolean>(); // remember which subreaders to decRef on close
|
private List<Boolean> decrefOnClose = new ArrayList<Boolean>(); // remember which subreaders to decRef on close
|
||||||
boolean incRefReaders = false;
|
boolean incRefReaders = false;
|
||||||
private SortedMap<String,IndexReader> fieldToReader = new TreeMap<String,IndexReader>();
|
private SortedMap<String,AtomicReader> fieldToReader = new TreeMap<String,AtomicReader>();
|
||||||
private Map<IndexReader,Collection<String>> readerToFields = new HashMap<IndexReader,Collection<String>>();
|
private Map<AtomicReader,Collection<String>> readerToFields = new HashMap<AtomicReader,Collection<String>>();
|
||||||
private List<IndexReader> storedFieldReaders = new ArrayList<IndexReader>();
|
private List<AtomicReader> storedFieldReaders = new ArrayList<AtomicReader>();
|
||||||
private Map<String, DocValues> normsCache = new HashMap<String,DocValues>();
|
private Map<String, DocValues> normsCache = new HashMap<String,DocValues>();
|
||||||
private final ReaderContext topLevelReaderContext = new AtomicReaderContext(this);
|
|
||||||
private int maxDoc;
|
private int maxDoc;
|
||||||
private int numDocs;
|
private int numDocs;
|
||||||
private boolean hasDeletions;
|
private boolean hasDeletions;
|
||||||
|
@ -77,7 +75,7 @@ public class ParallelReader extends IndexReader {
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
final StringBuilder buffer = new StringBuilder("ParallelReader(");
|
final StringBuilder buffer = new StringBuilder("ParallelReader(");
|
||||||
final Iterator<IndexReader> iter = readers.iterator();
|
final Iterator<AtomicReader> iter = readers.iterator();
|
||||||
if (iter.hasNext()) {
|
if (iter.hasNext()) {
|
||||||
buffer.append(iter.next());
|
buffer.append(iter.next());
|
||||||
}
|
}
|
||||||
|
@ -88,25 +86,25 @@ public class ParallelReader extends IndexReader {
|
||||||
return buffer.toString();
|
return buffer.toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Add an IndexReader.
|
/** Add an AtomicIndexReader.
|
||||||
* @throws IOException if there is a low-level IO error
|
* @throws IOException if there is a low-level IO error
|
||||||
*/
|
*/
|
||||||
public void add(IndexReader reader) throws IOException {
|
public void add(AtomicReader reader) throws IOException {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
add(reader, false);
|
add(reader, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Add an IndexReader whose stored fields will not be returned. This can
|
/** Add an AtomicIndexReader whose stored fields will not be returned. This can
|
||||||
* accelerate search when stored fields are only needed from a subset of
|
* accelerate search when stored fields are only needed from a subset of
|
||||||
* the IndexReaders.
|
* the IndexReaders.
|
||||||
*
|
*
|
||||||
* @throws IllegalArgumentException if not all indexes contain the same number
|
* @throws IllegalArgumentException if not all indexes contain the same number
|
||||||
* of documents
|
* of documents
|
||||||
* @throws IllegalArgumentException if not all indexes have the same value
|
* @throws IllegalArgumentException if not all indexes have the same value
|
||||||
* of {@link IndexReader#maxDoc()}
|
* of {@link AtomicReader#maxDoc()}
|
||||||
* @throws IOException if there is a low-level IO error
|
* @throws IOException if there is a low-level IO error
|
||||||
*/
|
*/
|
||||||
public void add(IndexReader reader, boolean ignoreStoredFields)
|
public void add(AtomicReader reader, boolean ignoreStoredFields)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
|
@ -123,13 +121,13 @@ public class ParallelReader extends IndexReader {
|
||||||
throw new IllegalArgumentException
|
throw new IllegalArgumentException
|
||||||
("All readers must have same numDocs: "+numDocs+"!="+reader.numDocs());
|
("All readers must have same numDocs: "+numDocs+"!="+reader.numDocs());
|
||||||
|
|
||||||
final FieldInfos readerFieldInfos = ReaderUtil.getMergedFieldInfos(reader);
|
final FieldInfos readerFieldInfos = MultiFields.getMergedFieldInfos(reader);
|
||||||
for(FieldInfo fieldInfo : readerFieldInfos) { // update fieldToReader map
|
for(FieldInfo fieldInfo : readerFieldInfos) { // update fieldToReader map
|
||||||
// NOTE: first reader having a given field "wins":
|
// NOTE: first reader having a given field "wins":
|
||||||
if (fieldToReader.get(fieldInfo.name) == null) {
|
if (fieldToReader.get(fieldInfo.name) == null) {
|
||||||
fieldInfos.add(fieldInfo);
|
fieldInfos.add(fieldInfo);
|
||||||
fieldToReader.put(fieldInfo.name, reader);
|
fieldToReader.put(fieldInfo.name, reader);
|
||||||
this.fields.addField(fieldInfo.name, MultiFields.getFields(reader).terms(fieldInfo.name));
|
this.fields.addField(fieldInfo.name, reader.terms(fieldInfo.name));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -205,7 +203,7 @@ public class ParallelReader extends IndexReader {
|
||||||
@Override
|
@Override
|
||||||
public Bits getLiveDocs() {
|
public Bits getLiveDocs() {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
return MultiFields.getLiveDocs(readers.get(0));
|
return readers.get(0).getLiveDocs();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -214,88 +212,6 @@ public class ParallelReader extends IndexReader {
|
||||||
return fields;
|
return fields;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Tries to reopen the subreaders.
|
|
||||||
* <br>
|
|
||||||
* If one or more subreaders could be re-opened (i. e. subReader.reopen()
|
|
||||||
* returned a new instance != subReader), then a new ParallelReader instance
|
|
||||||
* is returned, otherwise null is returned.
|
|
||||||
* <p>
|
|
||||||
* A re-opened instance might share one or more subreaders with the old
|
|
||||||
* instance. Index modification operations result in undefined behavior
|
|
||||||
* when performed before the old instance is closed.
|
|
||||||
* (see {@link IndexReader#openIfChanged}).
|
|
||||||
* <p>
|
|
||||||
* If subreaders are shared, then the reference count of those
|
|
||||||
* readers is increased to ensure that the subreaders remain open
|
|
||||||
* until the last referring reader is closed.
|
|
||||||
*
|
|
||||||
* @throws CorruptIndexException if the index is corrupt
|
|
||||||
* @throws IOException if there is a low-level IO error
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
protected synchronized IndexReader doOpenIfChanged() throws CorruptIndexException, IOException {
|
|
||||||
ensureOpen();
|
|
||||||
|
|
||||||
boolean reopened = false;
|
|
||||||
List<IndexReader> newReaders = new ArrayList<IndexReader>();
|
|
||||||
|
|
||||||
boolean success = false;
|
|
||||||
|
|
||||||
try {
|
|
||||||
for (final IndexReader oldReader : readers) {
|
|
||||||
IndexReader newReader = null;
|
|
||||||
newReader = IndexReader.openIfChanged(oldReader);
|
|
||||||
if (newReader != null) {
|
|
||||||
reopened = true;
|
|
||||||
} else {
|
|
||||||
newReader = oldReader;
|
|
||||||
}
|
|
||||||
newReaders.add(newReader);
|
|
||||||
}
|
|
||||||
success = true;
|
|
||||||
} finally {
|
|
||||||
if (!success && reopened) {
|
|
||||||
for (int i = 0; i < newReaders.size(); i++) {
|
|
||||||
IndexReader r = newReaders.get(i);
|
|
||||||
if (r != readers.get(i)) {
|
|
||||||
try {
|
|
||||||
r.close();
|
|
||||||
} catch (IOException ignore) {
|
|
||||||
// keep going - we want to clean up as much as possible
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (reopened) {
|
|
||||||
List<Boolean> newDecrefOnClose = new ArrayList<Boolean>();
|
|
||||||
// TODO: maybe add a special reopen-ctor for norm-copying?
|
|
||||||
ParallelReader pr = new ParallelReader();
|
|
||||||
for (int i = 0; i < readers.size(); i++) {
|
|
||||||
IndexReader oldReader = readers.get(i);
|
|
||||||
IndexReader newReader = newReaders.get(i);
|
|
||||||
if (newReader == oldReader) {
|
|
||||||
newDecrefOnClose.add(Boolean.TRUE);
|
|
||||||
newReader.incRef();
|
|
||||||
} else {
|
|
||||||
// this is a new subreader instance, so on close() we don't
|
|
||||||
// decRef but close it
|
|
||||||
newDecrefOnClose.add(Boolean.FALSE);
|
|
||||||
}
|
|
||||||
pr.add(newReader, !storedFieldReaders.contains(oldReader));
|
|
||||||
}
|
|
||||||
pr.decrefOnClose = newDecrefOnClose;
|
|
||||||
pr.incRefReaders = incRefReaders;
|
|
||||||
return pr;
|
|
||||||
} else {
|
|
||||||
// No subreader was refreshed
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int numDocs() {
|
public int numDocs() {
|
||||||
// Don't call ensureOpen() here (it could affect performance)
|
// Don't call ensureOpen() here (it could affect performance)
|
||||||
|
@ -317,7 +233,7 @@ public class ParallelReader extends IndexReader {
|
||||||
@Override
|
@Override
|
||||||
public void document(int docID, StoredFieldVisitor visitor) throws CorruptIndexException, IOException {
|
public void document(int docID, StoredFieldVisitor visitor) throws CorruptIndexException, IOException {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
for (final IndexReader reader: storedFieldReaders) {
|
for (final AtomicReader reader: storedFieldReaders) {
|
||||||
reader.document(docID, visitor);
|
reader.document(docID, visitor);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -327,7 +243,7 @@ public class ParallelReader extends IndexReader {
|
||||||
public Fields getTermVectors(int docID) throws IOException {
|
public Fields getTermVectors(int docID) throws IOException {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
ParallelFields fields = new ParallelFields();
|
ParallelFields fields = new ParallelFields();
|
||||||
for (Map.Entry<String,IndexReader> ent : fieldToReader.entrySet()) {
|
for (Map.Entry<String,AtomicReader> ent : fieldToReader.entrySet()) {
|
||||||
String fieldName = ent.getKey();
|
String fieldName = ent.getKey();
|
||||||
Terms vector = ent.getValue().getTermVector(docID, fieldName);
|
Terms vector = ent.getValue().getTermVector(docID, fieldName);
|
||||||
if (vector != null) {
|
if (vector != null) {
|
||||||
|
@ -341,44 +257,13 @@ public class ParallelReader extends IndexReader {
|
||||||
@Override
|
@Override
|
||||||
public boolean hasNorms(String field) throws IOException {
|
public boolean hasNorms(String field) throws IOException {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
IndexReader reader = fieldToReader.get(field);
|
AtomicReader reader = fieldToReader.get(field);
|
||||||
return reader==null ? false : reader.hasNorms(field);
|
return reader==null ? false : reader.hasNorms(field);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public int docFreq(String field, BytesRef term) throws IOException {
|
|
||||||
ensureOpen();
|
|
||||||
IndexReader reader = fieldToReader.get(field);
|
|
||||||
return reader == null? 0 : reader.docFreq(field, term);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Checks recursively if all subreaders are up to date.
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
public boolean isCurrent() throws CorruptIndexException, IOException {
|
|
||||||
ensureOpen();
|
|
||||||
for (final IndexReader reader : readers) {
|
|
||||||
if (!reader.isCurrent()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// all subreaders are up to date
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Not implemented.
|
|
||||||
* @throws UnsupportedOperationException
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
public long getVersion() {
|
|
||||||
throw new UnsupportedOperationException("ParallelReader does not support this method.");
|
|
||||||
}
|
|
||||||
|
|
||||||
// for testing
|
// for testing
|
||||||
IndexReader[] getSubReaders() {
|
AtomicReader[] getSubReaders() {
|
||||||
return readers.toArray(new IndexReader[readers.size()]);
|
return readers.toArray(new AtomicReader[readers.size()]);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -392,17 +277,11 @@ public class ParallelReader extends IndexReader {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public ReaderContext getTopReaderContext() {
|
|
||||||
ensureOpen();
|
|
||||||
return topLevelReaderContext;
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: I suspect this is completely untested!!!!!
|
// TODO: I suspect this is completely untested!!!!!
|
||||||
@Override
|
@Override
|
||||||
public DocValues docValues(String field) throws IOException {
|
public DocValues docValues(String field) throws IOException {
|
||||||
IndexReader reader = fieldToReader.get(field);
|
AtomicReader reader = fieldToReader.get(field);
|
||||||
return reader == null ? null : MultiDocValues.getDocValues(reader, field);
|
return reader == null ? null : reader.docValues(field);
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: I suspect this is completely untested!!!!!
|
// TODO: I suspect this is completely untested!!!!!
|
||||||
|
@ -410,8 +289,8 @@ public class ParallelReader extends IndexReader {
|
||||||
public synchronized DocValues normValues(String field) throws IOException {
|
public synchronized DocValues normValues(String field) throws IOException {
|
||||||
DocValues values = normsCache.get(field);
|
DocValues values = normsCache.get(field);
|
||||||
if (values == null) {
|
if (values == null) {
|
||||||
IndexReader reader = fieldToReader.get(field);
|
AtomicReader reader = fieldToReader.get(field);
|
||||||
values = reader == null ? null : MultiDocValues.getNormDocValues(reader, field);
|
values = reader == null ? null : reader.normValues(field);
|
||||||
normsCache.put(field, values);
|
normsCache.put(field, values);
|
||||||
}
|
}
|
||||||
return values;
|
return values;
|
||||||
|
|
|
@ -62,7 +62,7 @@ public class PersistentSnapshotDeletionPolicy extends SnapshotDeletionPolicy {
|
||||||
* keeps a lock on the snapshots directory).
|
* keeps a lock on the snapshots directory).
|
||||||
*/
|
*/
|
||||||
public static Map<String, String> readSnapshotsInfo(Directory dir) throws IOException {
|
public static Map<String, String> readSnapshotsInfo(Directory dir) throws IOException {
|
||||||
IndexReader r = IndexReader.open(dir);
|
IndexReader r = DirectoryReader.open(dir);
|
||||||
Map<String, String> snapshots = new HashMap<String, String>();
|
Map<String, String> snapshots = new HashMap<String, String>();
|
||||||
try {
|
try {
|
||||||
int numDocs = r.numDocs();
|
int numDocs = r.numDocs();
|
||||||
|
|
|
@ -76,7 +76,7 @@ final class SegmentMerger {
|
||||||
try {
|
try {
|
||||||
new ReaderUtil.Gather(reader) {
|
new ReaderUtil.Gather(reader) {
|
||||||
@Override
|
@Override
|
||||||
protected void add(int base, IndexReader r) {
|
protected void add(int base, AtomicReader r) {
|
||||||
mergeState.readers.add(new MergeState.IndexReaderAndLiveDocs(r, r.getLiveDocs()));
|
mergeState.readers.add(new MergeState.IndexReaderAndLiveDocs(r, r.getLiveDocs()));
|
||||||
}
|
}
|
||||||
}.run();
|
}.run();
|
||||||
|
@ -201,7 +201,7 @@ final class SegmentMerger {
|
||||||
Map<FieldInfo,TypePromoter> normValuesTypes = new HashMap<FieldInfo,TypePromoter>();
|
Map<FieldInfo,TypePromoter> normValuesTypes = new HashMap<FieldInfo,TypePromoter>();
|
||||||
|
|
||||||
for (MergeState.IndexReaderAndLiveDocs readerAndLiveDocs : mergeState.readers) {
|
for (MergeState.IndexReaderAndLiveDocs readerAndLiveDocs : mergeState.readers) {
|
||||||
final IndexReader reader = readerAndLiveDocs.reader;
|
final AtomicReader reader = readerAndLiveDocs.reader;
|
||||||
FieldInfos readerFieldInfos = reader.getFieldInfos();
|
FieldInfos readerFieldInfos = reader.getFieldInfos();
|
||||||
for (FieldInfo fi : readerFieldInfos) {
|
for (FieldInfo fi : readerFieldInfos) {
|
||||||
FieldInfo merged = mergeState.fieldInfos.add(fi);
|
FieldInfo merged = mergeState.fieldInfos.add(fi);
|
||||||
|
@ -323,7 +323,12 @@ final class SegmentMerger {
|
||||||
docBase += docCount;
|
docBase += docCount;
|
||||||
|
|
||||||
if (mergeState.payloadProcessorProvider != null) {
|
if (mergeState.payloadProcessorProvider != null) {
|
||||||
mergeState.dirPayloadProcessor[i] = mergeState.payloadProcessorProvider.getDirProcessor(reader.reader.directory());
|
// TODO: the PayloadProcessorProvider should take AtomicReader as parameter
|
||||||
|
// and find out by itself if it can provide a processor:
|
||||||
|
if (!(reader.reader instanceof SegmentReader))
|
||||||
|
throw new UnsupportedOperationException("Payload processing currently requires exclusively SegmentReaders to be merged.");
|
||||||
|
final Directory dir = ((SegmentReader) reader.reader).directory();
|
||||||
|
mergeState.dirPayloadProcessor[i] = mergeState.payloadProcessorProvider.getDirProcessor(dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
i++;
|
i++;
|
||||||
|
|
|
@ -30,11 +30,9 @@ import org.apache.lucene.util.Bits;
|
||||||
/**
|
/**
|
||||||
* @lucene.experimental
|
* @lucene.experimental
|
||||||
*/
|
*/
|
||||||
public final class SegmentReader extends IndexReader {
|
public final class SegmentReader extends AtomicReader {
|
||||||
|
|
||||||
private final SegmentInfo si;
|
private final SegmentInfo si;
|
||||||
private final ReaderContext readerContext = new AtomicReaderContext(this);
|
|
||||||
|
|
||||||
private final Bits liveDocs;
|
private final Bits liveDocs;
|
||||||
|
|
||||||
// Normally set to si.docCount - si.delDocCount, unless we
|
// Normally set to si.docCount - si.delDocCount, unless we
|
||||||
|
@ -186,12 +184,6 @@ public final class SegmentReader extends IndexReader {
|
||||||
return si.toString(si.dir, si.docCount - numDocs - si.getDelCount());
|
return si.toString(si.dir, si.docCount - numDocs - si.getDelCount());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public ReaderContext getTopReaderContext() {
|
|
||||||
ensureOpen();
|
|
||||||
return readerContext;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return the name of the segment this reader is reading.
|
* Return the name of the segment this reader is reading.
|
||||||
*/
|
*/
|
||||||
|
@ -207,7 +199,6 @@ public final class SegmentReader extends IndexReader {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Returns the directory this index resides in. */
|
/** Returns the directory this index resides in. */
|
||||||
@Override
|
|
||||||
public Directory directory() {
|
public Directory directory() {
|
||||||
// Don't ensureOpen here -- in certain cases, when a
|
// Don't ensureOpen here -- in certain cases, when a
|
||||||
// cloned/reopened reader needs to commit, it may call
|
// cloned/reopened reader needs to commit, it may call
|
||||||
|
@ -228,7 +219,6 @@ public final class SegmentReader extends IndexReader {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public int getTermInfosIndexDivisor() {
|
public int getTermInfosIndexDivisor() {
|
||||||
return core.termsIndexDivisor;
|
return core.termsIndexDivisor;
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,7 +31,7 @@ import org.apache.lucene.index.MultiReader; // javadoc
|
||||||
* This class forces a composite reader (eg a {@link
|
* This class forces a composite reader (eg a {@link
|
||||||
* MultiReader} or {@link DirectoryReader} or any other
|
* MultiReader} or {@link DirectoryReader} or any other
|
||||||
* IndexReader subclass that returns non-null from {@link
|
* IndexReader subclass that returns non-null from {@link
|
||||||
* IndexReader#getSequentialSubReaders}) to emulate an
|
* CompositeReader#getSequentialSubReaders}) to emulate an
|
||||||
* atomic reader. This requires implementing the postings
|
* atomic reader. This requires implementing the postings
|
||||||
* APIs on-the-fly, using the static methods in {@link
|
* APIs on-the-fly, using the static methods in {@link
|
||||||
* MultiFields}, {@link MultiDocValues},
|
* MultiFields}, {@link MultiDocValues},
|
||||||
|
@ -50,25 +50,42 @@ import org.apache.lucene.index.MultiReader; // javadoc
|
||||||
* yourself.</p>
|
* yourself.</p>
|
||||||
*/
|
*/
|
||||||
|
|
||||||
public final class SlowMultiReaderWrapper extends FilterIndexReader {
|
public final class SlowCompositeReaderWrapper extends AtomicReader {
|
||||||
|
|
||||||
private final ReaderContext readerContext;
|
private final CompositeReader in;
|
||||||
private final Map<String, DocValues> normsCache = new HashMap<String, DocValues>();
|
private final Map<String, DocValues> normsCache = new HashMap<String, DocValues>();
|
||||||
|
private final Fields fields;
|
||||||
|
private final Bits liveDocs;
|
||||||
|
|
||||||
public SlowMultiReaderWrapper(IndexReader other) {
|
/** This method is sugar for getting an {@link AtomicReader} from
|
||||||
super(other);
|
* an {@link IndexReader} of any kind. If the reader is already atomic,
|
||||||
readerContext = new AtomicReaderContext(this); // emulate atomic reader!
|
* it is returned unchanged, otherwise wrapped by this class.
|
||||||
|
*/
|
||||||
|
public static AtomicReader wrap(IndexReader reader) throws IOException {
|
||||||
|
if (reader instanceof CompositeReader) {
|
||||||
|
return new SlowCompositeReaderWrapper((CompositeReader) reader);
|
||||||
|
} else {
|
||||||
|
assert reader instanceof AtomicReader;
|
||||||
|
return (AtomicReader) reader;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public SlowCompositeReaderWrapper(CompositeReader reader) throws IOException {
|
||||||
|
super();
|
||||||
|
in = reader;
|
||||||
|
fields = MultiFields.getFields(in);
|
||||||
|
liveDocs = MultiFields.getLiveDocs(in);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return "SlowMultiReaderWrapper(" + in + ")";
|
return "SlowCompositeReaderWrapper(" + in + ")";
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Fields fields() throws IOException {
|
public Fields fields() throws IOException {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
return MultiFields.getFields(in);
|
return fields;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -87,25 +104,63 @@ public final class SlowMultiReaderWrapper extends FilterIndexReader {
|
||||||
}
|
}
|
||||||
return values;
|
return values;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Fields getTermVectors(int docID)
|
||||||
|
throws IOException {
|
||||||
|
ensureOpen();
|
||||||
|
return in.getTermVectors(docID);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int numDocs() {
|
||||||
|
// Don't call ensureOpen() here (it could affect performance)
|
||||||
|
return in.numDocs();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int maxDoc() {
|
||||||
|
// Don't call ensureOpen() here (it could affect performance)
|
||||||
|
return in.maxDoc();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void document(int docID, StoredFieldVisitor visitor) throws CorruptIndexException, IOException {
|
||||||
|
ensureOpen();
|
||||||
|
in.document(docID, visitor);
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Bits getLiveDocs() {
|
public Bits getLiveDocs() {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
return MultiFields.getLiveDocs(in);
|
return liveDocs;
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public IndexReader[] getSequentialSubReaders() {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public ReaderContext getTopReaderContext() {
|
|
||||||
ensureOpen();
|
|
||||||
return readerContext;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public FieldInfos getFieldInfos() {
|
public FieldInfos getFieldInfos() {
|
||||||
return ReaderUtil.getMergedFieldInfos(in);
|
ensureOpen();
|
||||||
|
return MultiFields.getMergedFieldInfos(in);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean hasDeletions() {
|
||||||
|
ensureOpen();
|
||||||
|
return liveDocs != null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Object getCoreCacheKey() {
|
||||||
|
return in.getCoreCacheKey();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Object getCombinedCoreAndDeletesKey() {
|
||||||
|
return in.getCombinedCoreAndDeletesKey();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void doClose() throws IOException {
|
||||||
|
// TODO: as this is a wrapper, should we really close the delegate?
|
||||||
|
in.close();
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -20,8 +20,8 @@ package org.apache.lucene.search;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.*;
|
import java.util.*;
|
||||||
|
|
||||||
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.index.DocsEnum;
|
import org.apache.lucene.index.DocsEnum;
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
import org.apache.lucene.index.Term;
|
import org.apache.lucene.index.Term;
|
||||||
import org.apache.lucene.index.TermsEnum;
|
import org.apache.lucene.index.TermsEnum;
|
||||||
|
@ -240,7 +240,7 @@ public class BooleanQuery extends Query implements Iterable<BooleanClause> {
|
||||||
for (Iterator<Weight> wIter = weights.iterator(); wIter.hasNext();) {
|
for (Iterator<Weight> wIter = weights.iterator(); wIter.hasNext();) {
|
||||||
Weight w = wIter.next();
|
Weight w = wIter.next();
|
||||||
BooleanClause c = cIter.next();
|
BooleanClause c = cIter.next();
|
||||||
if (w.scorer(context, true, true, context.reader.getLiveDocs()) == null) {
|
if (w.scorer(context, true, true, context.reader().getLiveDocs()) == null) {
|
||||||
if (c.isRequired()) {
|
if (c.isRequired()) {
|
||||||
fail = true;
|
fail = true;
|
||||||
Explanation r = new Explanation(0.0f, "no match on required clause (" + c.getQuery().toString() + ")");
|
Explanation r = new Explanation(0.0f, "no match on required clause (" + c.getQuery().toString() + ")");
|
||||||
|
|
|
@ -22,7 +22,7 @@ import java.util.ArrayList;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.search.BooleanClause.Occur;
|
import org.apache.lucene.search.BooleanClause.Occur;
|
||||||
import org.apache.lucene.search.BooleanQuery.BooleanWeight;
|
import org.apache.lucene.search.BooleanQuery.BooleanWeight;
|
||||||
|
|
||||||
|
|
|
@ -17,7 +17,7 @@ package org.apache.lucene.search;
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.util.RamUsageEstimator;
|
import org.apache.lucene.util.RamUsageEstimator;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
|
@ -22,8 +22,9 @@ import java.util.Collections;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.WeakHashMap;
|
import java.util.WeakHashMap;
|
||||||
|
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.AtomicReader;
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
|
import org.apache.lucene.index.DirectoryReader; // javadocs
|
||||||
import org.apache.lucene.util.FixedBitSet;
|
import org.apache.lucene.util.FixedBitSet;
|
||||||
import org.apache.lucene.util.Bits;
|
import org.apache.lucene.util.Bits;
|
||||||
|
|
||||||
|
@ -53,13 +54,13 @@ public class CachingWrapperFilter extends Filter {
|
||||||
|
|
||||||
/** Wraps another filter's result and caches it. If
|
/** Wraps another filter's result and caches it. If
|
||||||
* {@code recacheDeletes} is {@code true}, then new deletes (for example
|
* {@code recacheDeletes} is {@code true}, then new deletes (for example
|
||||||
* after {@link IndexReader#openIfChanged}) will cause the filter
|
* after {@link DirectoryReader#openIfChanged}) will cause the filter
|
||||||
* {@link DocIdSet} to be recached.
|
* {@link DocIdSet} to be recached.
|
||||||
*
|
*
|
||||||
* <p>If your index changes seldom, it is recommended to use {@code recacheDeletes=true},
|
* <p>If your index changes seldom, it is recommended to use {@code recacheDeletes=true},
|
||||||
* as recaching will only occur when the index is reopened.
|
* as recaching will only occur when the index is reopened.
|
||||||
* For near-real-time indexes or indexes that are often
|
* For near-real-time indexes or indexes that are often
|
||||||
* reopened with (e.g., {@link IndexReader#openIfChanged} is used), you should
|
* reopened with (e.g., {@link DirectoryReader#openIfChanged} is used), you should
|
||||||
* pass {@code recacheDeletes=false}. This will cache the filter results omitting
|
* pass {@code recacheDeletes=false}. This will cache the filter results omitting
|
||||||
* deletions and will AND them in while scoring.
|
* deletions and will AND them in while scoring.
|
||||||
* @param filter Filter to cache results of
|
* @param filter Filter to cache results of
|
||||||
|
@ -76,7 +77,7 @@ public class CachingWrapperFilter extends Filter {
|
||||||
* returns <code>true</code>, else it copies the {@link DocIdSetIterator} into
|
* returns <code>true</code>, else it copies the {@link DocIdSetIterator} into
|
||||||
* a {@link FixedBitSet}.
|
* a {@link FixedBitSet}.
|
||||||
*/
|
*/
|
||||||
protected DocIdSet docIdSetToCache(DocIdSet docIdSet, IndexReader reader) throws IOException {
|
protected DocIdSet docIdSetToCache(DocIdSet docIdSet, AtomicReader reader) throws IOException {
|
||||||
if (docIdSet == null) {
|
if (docIdSet == null) {
|
||||||
// this is better than returning null, as the nonnull result can be cached
|
// this is better than returning null, as the nonnull result can be cached
|
||||||
return DocIdSet.EMPTY_DOCIDSET;
|
return DocIdSet.EMPTY_DOCIDSET;
|
||||||
|
@ -102,7 +103,7 @@ public class CachingWrapperFilter extends Filter {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public DocIdSet getDocIdSet(AtomicReaderContext context, final Bits acceptDocs) throws IOException {
|
public DocIdSet getDocIdSet(AtomicReaderContext context, final Bits acceptDocs) throws IOException {
|
||||||
final IndexReader reader = context.reader;
|
final AtomicReader reader = context.reader();
|
||||||
|
|
||||||
// Only cache if incoming acceptDocs is == live docs;
|
// Only cache if incoming acceptDocs is == live docs;
|
||||||
// if Lucene passes in more interesting acceptDocs in
|
// if Lucene passes in more interesting acceptDocs in
|
||||||
|
|
|
@ -19,8 +19,8 @@ package org.apache.lucene.search;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.index.IndexReader.ReaderContext;
|
import org.apache.lucene.index.IndexReaderContext;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* <p>Expert: Collectors are primarily meant to be used to
|
* <p>Expert: Collectors are primarily meant to be used to
|
||||||
|
@ -145,9 +145,9 @@ public abstract class Collector {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Called before collecting from each {@link AtomicReaderContext}. All doc ids in
|
* Called before collecting from each {@link AtomicReaderContext}. All doc ids in
|
||||||
* {@link #collect(int)} will correspond to {@link ReaderContext#reader}.
|
* {@link #collect(int)} will correspond to {@link IndexReaderContext#reader}.
|
||||||
*
|
*
|
||||||
* Add {@link AtomicReaderContext#docBase} to the current {@link ReaderContext#reader}'s
|
* Add {@link AtomicReaderContext#docBase} to the current {@link IndexReaderContext#reader}'s
|
||||||
* internal document id to re-base ids in {@link #collect(int)}.
|
* internal document id to re-base ids in {@link #collect(int)}.
|
||||||
*
|
*
|
||||||
* @param context
|
* @param context
|
||||||
|
|
|
@ -17,8 +17,8 @@ package org.apache.lucene.search;
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
|
||||||
import org.apache.lucene.index.Term;
|
import org.apache.lucene.index.Term;
|
||||||
import org.apache.lucene.util.Bits;
|
import org.apache.lucene.util.Bits;
|
||||||
import org.apache.lucene.util.ToStringUtils;
|
import org.apache.lucene.util.ToStringUtils;
|
||||||
|
@ -149,7 +149,7 @@ public class ConstantScoreQuery extends Query {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
|
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
|
||||||
final Scorer cs = scorer(context, true, false, context.reader.getLiveDocs());
|
final Scorer cs = scorer(context, true, false, context.reader().getLiveDocs());
|
||||||
final boolean exists = (cs != null && cs.advance(doc) == doc);
|
final boolean exists = (cs != null && cs.advance(doc) == doc);
|
||||||
|
|
||||||
final ComplexExplanation result = new ComplexExplanation();
|
final ComplexExplanation result = new ComplexExplanation();
|
||||||
|
|
|
@ -22,8 +22,8 @@ import java.util.Collection;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
|
||||||
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
|
||||||
import org.apache.lucene.index.Term;
|
import org.apache.lucene.index.Term;
|
||||||
import org.apache.lucene.util.Bits;
|
import org.apache.lucene.util.Bits;
|
||||||
|
|
||||||
|
|
|
@ -24,7 +24,7 @@ import java.text.DecimalFormat;
|
||||||
import org.apache.lucene.analysis.NumericTokenStream; // for javadocs
|
import org.apache.lucene.analysis.NumericTokenStream; // for javadocs
|
||||||
import org.apache.lucene.document.NumericField; // for javadocs
|
import org.apache.lucene.document.NumericField; // for javadocs
|
||||||
import org.apache.lucene.index.DocTermOrds;
|
import org.apache.lucene.index.DocTermOrds;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.AtomicReader;
|
||||||
import org.apache.lucene.index.TermsEnum;
|
import org.apache.lucene.index.TermsEnum;
|
||||||
import org.apache.lucene.util.Bits;
|
import org.apache.lucene.util.Bits;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
|
@ -63,7 +63,7 @@ public interface FieldCache {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Interface to parse bytes from document fields.
|
/** Interface to parse bytes from document fields.
|
||||||
* @see FieldCache#getBytes(IndexReader, String, FieldCache.ByteParser, boolean)
|
* @see FieldCache#getBytes(AtomicReader, String, FieldCache.ByteParser, boolean)
|
||||||
*/
|
*/
|
||||||
public interface ByteParser extends Parser {
|
public interface ByteParser extends Parser {
|
||||||
/** Return a single Byte representation of this field's value. */
|
/** Return a single Byte representation of this field's value. */
|
||||||
|
@ -71,7 +71,7 @@ public interface FieldCache {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Interface to parse shorts from document fields.
|
/** Interface to parse shorts from document fields.
|
||||||
* @see FieldCache#getShorts(IndexReader, String, FieldCache.ShortParser, boolean)
|
* @see FieldCache#getShorts(AtomicReader, String, FieldCache.ShortParser, boolean)
|
||||||
*/
|
*/
|
||||||
public interface ShortParser extends Parser {
|
public interface ShortParser extends Parser {
|
||||||
/** Return a short representation of this field's value. */
|
/** Return a short representation of this field's value. */
|
||||||
|
@ -79,7 +79,7 @@ public interface FieldCache {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Interface to parse ints from document fields.
|
/** Interface to parse ints from document fields.
|
||||||
* @see FieldCache#getInts(IndexReader, String, FieldCache.IntParser, boolean)
|
* @see FieldCache#getInts(AtomicReader, String, FieldCache.IntParser, boolean)
|
||||||
*/
|
*/
|
||||||
public interface IntParser extends Parser {
|
public interface IntParser extends Parser {
|
||||||
/** Return an integer representation of this field's value. */
|
/** Return an integer representation of this field's value. */
|
||||||
|
@ -87,7 +87,7 @@ public interface FieldCache {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Interface to parse floats from document fields.
|
/** Interface to parse floats from document fields.
|
||||||
* @see FieldCache#getFloats(IndexReader, String, FieldCache.FloatParser, boolean)
|
* @see FieldCache#getFloats(AtomicReader, String, FieldCache.FloatParser, boolean)
|
||||||
*/
|
*/
|
||||||
public interface FloatParser extends Parser {
|
public interface FloatParser extends Parser {
|
||||||
/** Return an float representation of this field's value. */
|
/** Return an float representation of this field's value. */
|
||||||
|
@ -95,7 +95,7 @@ public interface FieldCache {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Interface to parse long from document fields.
|
/** Interface to parse long from document fields.
|
||||||
* @see FieldCache#getLongs(IndexReader, String, FieldCache.LongParser, boolean)
|
* @see FieldCache#getLongs(AtomicReader, String, FieldCache.LongParser, boolean)
|
||||||
*/
|
*/
|
||||||
public interface LongParser extends Parser {
|
public interface LongParser extends Parser {
|
||||||
/** Return an long representation of this field's value. */
|
/** Return an long representation of this field's value. */
|
||||||
|
@ -103,7 +103,7 @@ public interface FieldCache {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Interface to parse doubles from document fields.
|
/** Interface to parse doubles from document fields.
|
||||||
* @see FieldCache#getDoubles(IndexReader, String, FieldCache.DoubleParser, boolean)
|
* @see FieldCache#getDoubles(AtomicReader, String, FieldCache.DoubleParser, boolean)
|
||||||
*/
|
*/
|
||||||
public interface DoubleParser extends Parser {
|
public interface DoubleParser extends Parser {
|
||||||
/** Return an long representation of this field's value. */
|
/** Return an long representation of this field's value. */
|
||||||
|
@ -303,7 +303,7 @@ public interface FieldCache {
|
||||||
* <code>reader.maxDoc()</code>, with turned on bits for each docid that
|
* <code>reader.maxDoc()</code>, with turned on bits for each docid that
|
||||||
* does have a value for this field.
|
* does have a value for this field.
|
||||||
*/
|
*/
|
||||||
public Bits getDocsWithField(IndexReader reader, String field)
|
public Bits getDocsWithField(AtomicReader reader, String field)
|
||||||
throws IOException;
|
throws IOException;
|
||||||
|
|
||||||
/** Checks the internal cache for an appropriate entry, and if none is
|
/** Checks the internal cache for an appropriate entry, and if none is
|
||||||
|
@ -317,7 +317,7 @@ public interface FieldCache {
|
||||||
* @return The values in the given field for each document.
|
* @return The values in the given field for each document.
|
||||||
* @throws IOException If any error occurs.
|
* @throws IOException If any error occurs.
|
||||||
*/
|
*/
|
||||||
public byte[] getBytes (IndexReader reader, String field, boolean setDocsWithField)
|
public byte[] getBytes (AtomicReader reader, String field, boolean setDocsWithField)
|
||||||
throws IOException;
|
throws IOException;
|
||||||
|
|
||||||
/** Checks the internal cache for an appropriate entry, and if none is found,
|
/** Checks the internal cache for an appropriate entry, and if none is found,
|
||||||
|
@ -332,7 +332,7 @@ public interface FieldCache {
|
||||||
* @return The values in the given field for each document.
|
* @return The values in the given field for each document.
|
||||||
* @throws IOException If any error occurs.
|
* @throws IOException If any error occurs.
|
||||||
*/
|
*/
|
||||||
public byte[] getBytes (IndexReader reader, String field, ByteParser parser, boolean setDocsWithField)
|
public byte[] getBytes (AtomicReader reader, String field, ByteParser parser, boolean setDocsWithField)
|
||||||
throws IOException;
|
throws IOException;
|
||||||
|
|
||||||
/** Checks the internal cache for an appropriate entry, and if none is
|
/** Checks the internal cache for an appropriate entry, and if none is
|
||||||
|
@ -346,7 +346,7 @@ public interface FieldCache {
|
||||||
* @return The values in the given field for each document.
|
* @return The values in the given field for each document.
|
||||||
* @throws IOException If any error occurs.
|
* @throws IOException If any error occurs.
|
||||||
*/
|
*/
|
||||||
public short[] getShorts (IndexReader reader, String field, boolean setDocsWithField)
|
public short[] getShorts (AtomicReader reader, String field, boolean setDocsWithField)
|
||||||
throws IOException;
|
throws IOException;
|
||||||
|
|
||||||
/** Checks the internal cache for an appropriate entry, and if none is found,
|
/** Checks the internal cache for an appropriate entry, and if none is found,
|
||||||
|
@ -361,7 +361,7 @@ public interface FieldCache {
|
||||||
* @return The values in the given field for each document.
|
* @return The values in the given field for each document.
|
||||||
* @throws IOException If any error occurs.
|
* @throws IOException If any error occurs.
|
||||||
*/
|
*/
|
||||||
public short[] getShorts (IndexReader reader, String field, ShortParser parser, boolean setDocsWithField)
|
public short[] getShorts (AtomicReader reader, String field, ShortParser parser, boolean setDocsWithField)
|
||||||
throws IOException;
|
throws IOException;
|
||||||
|
|
||||||
/** Checks the internal cache for an appropriate entry, and if none is
|
/** Checks the internal cache for an appropriate entry, and if none is
|
||||||
|
@ -375,7 +375,7 @@ public interface FieldCache {
|
||||||
* @return The values in the given field for each document.
|
* @return The values in the given field for each document.
|
||||||
* @throws IOException If any error occurs.
|
* @throws IOException If any error occurs.
|
||||||
*/
|
*/
|
||||||
public int[] getInts (IndexReader reader, String field, boolean setDocsWithField)
|
public int[] getInts (AtomicReader reader, String field, boolean setDocsWithField)
|
||||||
throws IOException;
|
throws IOException;
|
||||||
|
|
||||||
/** Checks the internal cache for an appropriate entry, and if none is found,
|
/** Checks the internal cache for an appropriate entry, and if none is found,
|
||||||
|
@ -390,7 +390,7 @@ public interface FieldCache {
|
||||||
* @return The values in the given field for each document.
|
* @return The values in the given field for each document.
|
||||||
* @throws IOException If any error occurs.
|
* @throws IOException If any error occurs.
|
||||||
*/
|
*/
|
||||||
public int[] getInts (IndexReader reader, String field, IntParser parser, boolean setDocsWithField)
|
public int[] getInts (AtomicReader reader, String field, IntParser parser, boolean setDocsWithField)
|
||||||
throws IOException;
|
throws IOException;
|
||||||
|
|
||||||
/** Checks the internal cache for an appropriate entry, and if
|
/** Checks the internal cache for an appropriate entry, and if
|
||||||
|
@ -404,7 +404,7 @@ public interface FieldCache {
|
||||||
* @return The values in the given field for each document.
|
* @return The values in the given field for each document.
|
||||||
* @throws IOException If any error occurs.
|
* @throws IOException If any error occurs.
|
||||||
*/
|
*/
|
||||||
public float[] getFloats (IndexReader reader, String field, boolean setDocsWithField)
|
public float[] getFloats (AtomicReader reader, String field, boolean setDocsWithField)
|
||||||
throws IOException;
|
throws IOException;
|
||||||
|
|
||||||
/** Checks the internal cache for an appropriate entry, and if
|
/** Checks the internal cache for an appropriate entry, and if
|
||||||
|
@ -419,7 +419,7 @@ public interface FieldCache {
|
||||||
* @return The values in the given field for each document.
|
* @return The values in the given field for each document.
|
||||||
* @throws IOException If any error occurs.
|
* @throws IOException If any error occurs.
|
||||||
*/
|
*/
|
||||||
public float[] getFloats (IndexReader reader, String field,
|
public float[] getFloats (AtomicReader reader, String field,
|
||||||
FloatParser parser, boolean setDocsWithField) throws IOException;
|
FloatParser parser, boolean setDocsWithField) throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -435,7 +435,7 @@ public interface FieldCache {
|
||||||
* @return The values in the given field for each document.
|
* @return The values in the given field for each document.
|
||||||
* @throws java.io.IOException If any error occurs.
|
* @throws java.io.IOException If any error occurs.
|
||||||
*/
|
*/
|
||||||
public long[] getLongs(IndexReader reader, String field, boolean setDocsWithField)
|
public long[] getLongs(AtomicReader reader, String field, boolean setDocsWithField)
|
||||||
throws IOException;
|
throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -452,7 +452,7 @@ public interface FieldCache {
|
||||||
* @return The values in the given field for each document.
|
* @return The values in the given field for each document.
|
||||||
* @throws IOException If any error occurs.
|
* @throws IOException If any error occurs.
|
||||||
*/
|
*/
|
||||||
public long[] getLongs(IndexReader reader, String field, LongParser parser, boolean setDocsWithField)
|
public long[] getLongs(AtomicReader reader, String field, LongParser parser, boolean setDocsWithField)
|
||||||
throws IOException;
|
throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -468,7 +468,7 @@ public interface FieldCache {
|
||||||
* @return The values in the given field for each document.
|
* @return The values in the given field for each document.
|
||||||
* @throws IOException If any error occurs.
|
* @throws IOException If any error occurs.
|
||||||
*/
|
*/
|
||||||
public double[] getDoubles(IndexReader reader, String field, boolean setDocsWithField)
|
public double[] getDoubles(AtomicReader reader, String field, boolean setDocsWithField)
|
||||||
throws IOException;
|
throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -485,7 +485,7 @@ public interface FieldCache {
|
||||||
* @return The values in the given field for each document.
|
* @return The values in the given field for each document.
|
||||||
* @throws IOException If any error occurs.
|
* @throws IOException If any error occurs.
|
||||||
*/
|
*/
|
||||||
public double[] getDoubles(IndexReader reader, String field, DoubleParser parser, boolean setDocsWithField)
|
public double[] getDoubles(AtomicReader reader, String field, DoubleParser parser, boolean setDocsWithField)
|
||||||
throws IOException;
|
throws IOException;
|
||||||
|
|
||||||
/** Returned by {@link #getTerms} */
|
/** Returned by {@link #getTerms} */
|
||||||
|
@ -513,15 +513,15 @@ public interface FieldCache {
|
||||||
* @return The values in the given field for each document.
|
* @return The values in the given field for each document.
|
||||||
* @throws IOException If any error occurs.
|
* @throws IOException If any error occurs.
|
||||||
*/
|
*/
|
||||||
public DocTerms getTerms (IndexReader reader, String field)
|
public DocTerms getTerms (AtomicReader reader, String field)
|
||||||
throws IOException;
|
throws IOException;
|
||||||
|
|
||||||
/** Expert: just like {@link #getTerms(IndexReader,String)},
|
/** Expert: just like {@link #getTerms(AtomicReader,String)},
|
||||||
* but you can specify whether more RAM should be consumed in exchange for
|
* but you can specify whether more RAM should be consumed in exchange for
|
||||||
* faster lookups (default is "true"). Note that the
|
* faster lookups (default is "true"). Note that the
|
||||||
* first call for a given reader and field "wins",
|
* first call for a given reader and field "wins",
|
||||||
* subsequent calls will share the same cache entry. */
|
* subsequent calls will share the same cache entry. */
|
||||||
public DocTerms getTerms (IndexReader reader, String field, boolean fasterButMoreRAM)
|
public DocTerms getTerms (AtomicReader reader, String field, boolean fasterButMoreRAM)
|
||||||
throws IOException;
|
throws IOException;
|
||||||
|
|
||||||
/** Returned by {@link #getTermsIndex} */
|
/** Returned by {@link #getTermsIndex} */
|
||||||
|
@ -589,16 +589,16 @@ public interface FieldCache {
|
||||||
* @return The values in the given field for each document.
|
* @return The values in the given field for each document.
|
||||||
* @throws IOException If any error occurs.
|
* @throws IOException If any error occurs.
|
||||||
*/
|
*/
|
||||||
public DocTermsIndex getTermsIndex (IndexReader reader, String field)
|
public DocTermsIndex getTermsIndex (AtomicReader reader, String field)
|
||||||
throws IOException;
|
throws IOException;
|
||||||
|
|
||||||
/** Expert: just like {@link
|
/** Expert: just like {@link
|
||||||
* #getTermsIndex(IndexReader,String)}, but you can specify
|
* #getTermsIndex(AtomicReader,String)}, but you can specify
|
||||||
* whether more RAM should be consumed in exchange for
|
* whether more RAM should be consumed in exchange for
|
||||||
* faster lookups (default is "true"). Note that the
|
* faster lookups (default is "true"). Note that the
|
||||||
* first call for a given reader and field "wins",
|
* first call for a given reader and field "wins",
|
||||||
* subsequent calls will share the same cache entry. */
|
* subsequent calls will share the same cache entry. */
|
||||||
public DocTermsIndex getTermsIndex (IndexReader reader, String field, boolean fasterButMoreRAM)
|
public DocTermsIndex getTermsIndex (AtomicReader reader, String field, boolean fasterButMoreRAM)
|
||||||
throws IOException;
|
throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -611,7 +611,7 @@ public interface FieldCache {
|
||||||
* @return a {@link DocTermOrds} instance
|
* @return a {@link DocTermOrds} instance
|
||||||
* @throws IOException If any error occurs.
|
* @throws IOException If any error occurs.
|
||||||
*/
|
*/
|
||||||
public DocTermOrds getDocTermOrds(IndexReader reader, String field) throws IOException;
|
public DocTermOrds getDocTermOrds(AtomicReader reader, String field) throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* EXPERT: A unique Identifier/Description for each item in the FieldCache.
|
* EXPERT: A unique Identifier/Description for each item in the FieldCache.
|
||||||
|
@ -677,7 +677,7 @@ public interface FieldCache {
|
||||||
* currently in the FieldCache.
|
* currently in the FieldCache.
|
||||||
* <p>
|
* <p>
|
||||||
* NOTE: These CacheEntry objects maintain a strong reference to the
|
* NOTE: These CacheEntry objects maintain a strong reference to the
|
||||||
* Cached Values. Maintaining references to a CacheEntry the IndexReader
|
* Cached Values. Maintaining references to a CacheEntry the AtomicIndexReader
|
||||||
* associated with it has garbage collected will prevent the Value itself
|
* associated with it has garbage collected will prevent the Value itself
|
||||||
* from being garbage collected when the Cache drops the WeakReference.
|
* from being garbage collected when the Cache drops the WeakReference.
|
||||||
* </p>
|
* </p>
|
||||||
|
@ -705,7 +705,7 @@ public interface FieldCache {
|
||||||
* top-level reader, it usually will have no effect as
|
* top-level reader, it usually will have no effect as
|
||||||
* Lucene now caches at the segment reader level.
|
* Lucene now caches at the segment reader level.
|
||||||
*/
|
*/
|
||||||
public abstract void purge(IndexReader r);
|
public abstract void purge(AtomicReader r);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* If non-null, FieldCacheImpl will warn whenever
|
* If non-null, FieldCacheImpl will warn whenever
|
||||||
|
|
|
@ -29,6 +29,7 @@ import java.util.WeakHashMap;
|
||||||
import org.apache.lucene.index.DocTermOrds;
|
import org.apache.lucene.index.DocTermOrds;
|
||||||
import org.apache.lucene.index.DocsAndPositionsEnum;
|
import org.apache.lucene.index.DocsAndPositionsEnum;
|
||||||
import org.apache.lucene.index.DocsEnum;
|
import org.apache.lucene.index.DocsEnum;
|
||||||
|
import org.apache.lucene.index.AtomicReader;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
import org.apache.lucene.index.OrdTermState;
|
import org.apache.lucene.index.OrdTermState;
|
||||||
import org.apache.lucene.index.SegmentReader;
|
import org.apache.lucene.index.SegmentReader;
|
||||||
|
@ -48,8 +49,6 @@ import org.apache.lucene.util.packed.PackedInts;
|
||||||
* Expert: The default cache implementation, storing all values in memory.
|
* Expert: The default cache implementation, storing all values in memory.
|
||||||
* A WeakHashMap is used for storage.
|
* A WeakHashMap is used for storage.
|
||||||
*
|
*
|
||||||
* <p>Created: May 19, 2004 4:40:36 PM
|
|
||||||
*
|
|
||||||
* @since lucene 1.4
|
* @since lucene 1.4
|
||||||
*/
|
*/
|
||||||
class FieldCacheImpl implements FieldCache {
|
class FieldCacheImpl implements FieldCache {
|
||||||
|
@ -76,7 +75,7 @@ class FieldCacheImpl implements FieldCache {
|
||||||
init();
|
init();
|
||||||
}
|
}
|
||||||
|
|
||||||
public synchronized void purge(IndexReader r) {
|
public synchronized void purge(AtomicReader r) {
|
||||||
for(Cache c : caches.values()) {
|
for(Cache c : caches.values()) {
|
||||||
c.purge(r);
|
c.purge(r);
|
||||||
}
|
}
|
||||||
|
@ -158,21 +157,20 @@ class FieldCacheImpl implements FieldCache {
|
||||||
final IndexReader.ReaderClosedListener purgeReader = new IndexReader.ReaderClosedListener() {
|
final IndexReader.ReaderClosedListener purgeReader = new IndexReader.ReaderClosedListener() {
|
||||||
@Override
|
@Override
|
||||||
public void onClose(IndexReader owner) {
|
public void onClose(IndexReader owner) {
|
||||||
FieldCacheImpl.this.purge(owner);
|
assert owner instanceof AtomicReader;
|
||||||
|
FieldCacheImpl.this.purge((AtomicReader) owner);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
private void initReader(IndexReader reader) {
|
private void initReader(AtomicReader reader) {
|
||||||
if (reader instanceof SegmentReader) {
|
if (reader instanceof SegmentReader) {
|
||||||
((SegmentReader) reader).addCoreClosedListener(purgeCore);
|
((SegmentReader) reader).addCoreClosedListener(purgeCore);
|
||||||
} else if (reader.getSequentialSubReaders() != null) {
|
|
||||||
throw new UnsupportedOperationException("Please use SlowMultiReaderWrapper, if you really need a top level FieldCache");
|
|
||||||
} else {
|
} else {
|
||||||
// we have a slow reader of some sort, try to register a purge event
|
// we have a slow reader of some sort, try to register a purge event
|
||||||
// rather than relying on gc:
|
// rather than relying on gc:
|
||||||
Object key = reader.getCoreCacheKey();
|
Object key = reader.getCoreCacheKey();
|
||||||
if (key instanceof IndexReader) {
|
if (key instanceof AtomicReader) {
|
||||||
((IndexReader)key).addReaderClosedListener(purgeReader);
|
((AtomicReader)key).addReaderClosedListener(purgeReader);
|
||||||
} else {
|
} else {
|
||||||
// last chance
|
// last chance
|
||||||
reader.addReaderClosedListener(purgeReader);
|
reader.addReaderClosedListener(purgeReader);
|
||||||
|
@ -191,11 +189,11 @@ class FieldCacheImpl implements FieldCache {
|
||||||
|
|
||||||
final Map<Object,Map<Entry,Object>> readerCache = new WeakHashMap<Object,Map<Entry,Object>>();
|
final Map<Object,Map<Entry,Object>> readerCache = new WeakHashMap<Object,Map<Entry,Object>>();
|
||||||
|
|
||||||
protected abstract Object createValue(IndexReader reader, Entry key, boolean setDocsWithField)
|
protected abstract Object createValue(AtomicReader reader, Entry key, boolean setDocsWithField)
|
||||||
throws IOException;
|
throws IOException;
|
||||||
|
|
||||||
/** Remove this reader from the cache, if present. */
|
/** Remove this reader from the cache, if present. */
|
||||||
public void purge(IndexReader r) {
|
public void purge(AtomicReader r) {
|
||||||
Object readerKey = r.getCoreCacheKey();
|
Object readerKey = r.getCoreCacheKey();
|
||||||
synchronized(readerCache) {
|
synchronized(readerCache) {
|
||||||
readerCache.remove(readerKey);
|
readerCache.remove(readerKey);
|
||||||
|
@ -204,7 +202,7 @@ class FieldCacheImpl implements FieldCache {
|
||||||
|
|
||||||
/** Sets the key to the value for the provided reader;
|
/** Sets the key to the value for the provided reader;
|
||||||
* if the key is already set then this doesn't change it. */
|
* if the key is already set then this doesn't change it. */
|
||||||
public void put(IndexReader reader, Entry key, Object value) {
|
public void put(AtomicReader reader, Entry key, Object value) {
|
||||||
final Object readerKey = reader.getCoreCacheKey();
|
final Object readerKey = reader.getCoreCacheKey();
|
||||||
synchronized (readerCache) {
|
synchronized (readerCache) {
|
||||||
Map<Entry,Object> innerCache = readerCache.get(readerKey);
|
Map<Entry,Object> innerCache = readerCache.get(readerKey);
|
||||||
|
@ -223,7 +221,7 @@ class FieldCacheImpl implements FieldCache {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public Object get(IndexReader reader, Entry key, boolean setDocsWithField) throws IOException {
|
public Object get(AtomicReader reader, Entry key, boolean setDocsWithField) throws IOException {
|
||||||
Map<Entry,Object> innerCache;
|
Map<Entry,Object> innerCache;
|
||||||
Object value;
|
Object value;
|
||||||
final Object readerKey = reader.getCoreCacheKey();
|
final Object readerKey = reader.getCoreCacheKey();
|
||||||
|
@ -321,12 +319,12 @@ class FieldCacheImpl implements FieldCache {
|
||||||
}
|
}
|
||||||
|
|
||||||
// inherit javadocs
|
// inherit javadocs
|
||||||
public byte[] getBytes (IndexReader reader, String field, boolean setDocsWithField) throws IOException {
|
public byte[] getBytes (AtomicReader reader, String field, boolean setDocsWithField) throws IOException {
|
||||||
return getBytes(reader, field, null, setDocsWithField);
|
return getBytes(reader, field, null, setDocsWithField);
|
||||||
}
|
}
|
||||||
|
|
||||||
// inherit javadocs
|
// inherit javadocs
|
||||||
public byte[] getBytes(IndexReader reader, String field, ByteParser parser, boolean setDocsWithField)
|
public byte[] getBytes(AtomicReader reader, String field, ByteParser parser, boolean setDocsWithField)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
return (byte[]) caches.get(Byte.TYPE).get(reader, new Entry(field, parser), setDocsWithField);
|
return (byte[]) caches.get(Byte.TYPE).get(reader, new Entry(field, parser), setDocsWithField);
|
||||||
}
|
}
|
||||||
|
@ -336,7 +334,7 @@ class FieldCacheImpl implements FieldCache {
|
||||||
super(wrapper);
|
super(wrapper);
|
||||||
}
|
}
|
||||||
@Override
|
@Override
|
||||||
protected Object createValue(IndexReader reader, Entry entryKey, boolean setDocsWithField)
|
protected Object createValue(AtomicReader reader, Entry entryKey, boolean setDocsWithField)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
String field = entryKey.field;
|
String field = entryKey.field;
|
||||||
ByteParser parser = (ByteParser) entryKey.custom;
|
ByteParser parser = (ByteParser) entryKey.custom;
|
||||||
|
@ -393,12 +391,12 @@ class FieldCacheImpl implements FieldCache {
|
||||||
}
|
}
|
||||||
|
|
||||||
// inherit javadocs
|
// inherit javadocs
|
||||||
public short[] getShorts (IndexReader reader, String field, boolean setDocsWithField) throws IOException {
|
public short[] getShorts (AtomicReader reader, String field, boolean setDocsWithField) throws IOException {
|
||||||
return getShorts(reader, field, null, setDocsWithField);
|
return getShorts(reader, field, null, setDocsWithField);
|
||||||
}
|
}
|
||||||
|
|
||||||
// inherit javadocs
|
// inherit javadocs
|
||||||
public short[] getShorts(IndexReader reader, String field, ShortParser parser, boolean setDocsWithField)
|
public short[] getShorts(AtomicReader reader, String field, ShortParser parser, boolean setDocsWithField)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
return (short[]) caches.get(Short.TYPE).get(reader, new Entry(field, parser), setDocsWithField);
|
return (short[]) caches.get(Short.TYPE).get(reader, new Entry(field, parser), setDocsWithField);
|
||||||
}
|
}
|
||||||
|
@ -409,7 +407,7 @@ class FieldCacheImpl implements FieldCache {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected Object createValue(IndexReader reader, Entry entryKey, boolean setDocsWithField)
|
protected Object createValue(AtomicReader reader, Entry entryKey, boolean setDocsWithField)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
String field = entryKey.field;
|
String field = entryKey.field;
|
||||||
ShortParser parser = (ShortParser) entryKey.custom;
|
ShortParser parser = (ShortParser) entryKey.custom;
|
||||||
|
@ -466,7 +464,7 @@ class FieldCacheImpl implements FieldCache {
|
||||||
}
|
}
|
||||||
|
|
||||||
// null Bits means no docs matched
|
// null Bits means no docs matched
|
||||||
void setDocsWithField(IndexReader reader, String field, Bits docsWithField) {
|
void setDocsWithField(AtomicReader reader, String field, Bits docsWithField) {
|
||||||
final int maxDoc = reader.maxDoc();
|
final int maxDoc = reader.maxDoc();
|
||||||
final Bits bits;
|
final Bits bits;
|
||||||
if (docsWithField == null) {
|
if (docsWithField == null) {
|
||||||
|
@ -487,12 +485,12 @@ class FieldCacheImpl implements FieldCache {
|
||||||
}
|
}
|
||||||
|
|
||||||
// inherit javadocs
|
// inherit javadocs
|
||||||
public int[] getInts (IndexReader reader, String field, boolean setDocsWithField) throws IOException {
|
public int[] getInts (AtomicReader reader, String field, boolean setDocsWithField) throws IOException {
|
||||||
return getInts(reader, field, null, setDocsWithField);
|
return getInts(reader, field, null, setDocsWithField);
|
||||||
}
|
}
|
||||||
|
|
||||||
// inherit javadocs
|
// inherit javadocs
|
||||||
public int[] getInts(IndexReader reader, String field, IntParser parser, boolean setDocsWithField)
|
public int[] getInts(AtomicReader reader, String field, IntParser parser, boolean setDocsWithField)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
return (int[]) caches.get(Integer.TYPE).get(reader, new Entry(field, parser), setDocsWithField);
|
return (int[]) caches.get(Integer.TYPE).get(reader, new Entry(field, parser), setDocsWithField);
|
||||||
}
|
}
|
||||||
|
@ -503,7 +501,7 @@ class FieldCacheImpl implements FieldCache {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected Object createValue(IndexReader reader, Entry entryKey, boolean setDocsWithField)
|
protected Object createValue(AtomicReader reader, Entry entryKey, boolean setDocsWithField)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
String field = entryKey.field;
|
String field = entryKey.field;
|
||||||
IntParser parser = (IntParser) entryKey.custom;
|
IntParser parser = (IntParser) entryKey.custom;
|
||||||
|
@ -574,7 +572,7 @@ class FieldCacheImpl implements FieldCache {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public Bits getDocsWithField(IndexReader reader, String field)
|
public Bits getDocsWithField(AtomicReader reader, String field)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
return (Bits) caches.get(DocsWithFieldCache.class).get(reader, new Entry(field, null), false);
|
return (Bits) caches.get(DocsWithFieldCache.class).get(reader, new Entry(field, null), false);
|
||||||
}
|
}
|
||||||
|
@ -585,7 +583,7 @@ class FieldCacheImpl implements FieldCache {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected Object createValue(IndexReader reader, Entry entryKey, boolean setDocsWithField /* ignored */)
|
protected Object createValue(AtomicReader reader, Entry entryKey, boolean setDocsWithField /* ignored */)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
final String field = entryKey.field;
|
final String field = entryKey.field;
|
||||||
FixedBitSet res = null;
|
FixedBitSet res = null;
|
||||||
|
@ -635,13 +633,13 @@ class FieldCacheImpl implements FieldCache {
|
||||||
}
|
}
|
||||||
|
|
||||||
// inherit javadocs
|
// inherit javadocs
|
||||||
public float[] getFloats (IndexReader reader, String field, boolean setDocsWithField)
|
public float[] getFloats (AtomicReader reader, String field, boolean setDocsWithField)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
return getFloats(reader, field, null, setDocsWithField);
|
return getFloats(reader, field, null, setDocsWithField);
|
||||||
}
|
}
|
||||||
|
|
||||||
// inherit javadocs
|
// inherit javadocs
|
||||||
public float[] getFloats(IndexReader reader, String field, FloatParser parser, boolean setDocsWithField)
|
public float[] getFloats(AtomicReader reader, String field, FloatParser parser, boolean setDocsWithField)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
|
||||||
return (float[]) caches.get(Float.TYPE).get(reader, new Entry(field, parser), setDocsWithField);
|
return (float[]) caches.get(Float.TYPE).get(reader, new Entry(field, parser), setDocsWithField);
|
||||||
|
@ -653,7 +651,7 @@ class FieldCacheImpl implements FieldCache {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected Object createValue(IndexReader reader, Entry entryKey, boolean setDocsWithField)
|
protected Object createValue(AtomicReader reader, Entry entryKey, boolean setDocsWithField)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
String field = entryKey.field;
|
String field = entryKey.field;
|
||||||
FloatParser parser = (FloatParser) entryKey.custom;
|
FloatParser parser = (FloatParser) entryKey.custom;
|
||||||
|
@ -725,12 +723,12 @@ class FieldCacheImpl implements FieldCache {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
public long[] getLongs(IndexReader reader, String field, boolean setDocsWithField) throws IOException {
|
public long[] getLongs(AtomicReader reader, String field, boolean setDocsWithField) throws IOException {
|
||||||
return getLongs(reader, field, null, setDocsWithField);
|
return getLongs(reader, field, null, setDocsWithField);
|
||||||
}
|
}
|
||||||
|
|
||||||
// inherit javadocs
|
// inherit javadocs
|
||||||
public long[] getLongs(IndexReader reader, String field, FieldCache.LongParser parser, boolean setDocsWithField)
|
public long[] getLongs(AtomicReader reader, String field, FieldCache.LongParser parser, boolean setDocsWithField)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
return (long[]) caches.get(Long.TYPE).get(reader, new Entry(field, parser), setDocsWithField);
|
return (long[]) caches.get(Long.TYPE).get(reader, new Entry(field, parser), setDocsWithField);
|
||||||
}
|
}
|
||||||
|
@ -741,7 +739,7 @@ class FieldCacheImpl implements FieldCache {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected Object createValue(IndexReader reader, Entry entryKey, boolean setDocsWithField)
|
protected Object createValue(AtomicReader reader, Entry entryKey, boolean setDocsWithField)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
String field = entryKey.field;
|
String field = entryKey.field;
|
||||||
FieldCache.LongParser parser = (FieldCache.LongParser) entryKey.custom;
|
FieldCache.LongParser parser = (FieldCache.LongParser) entryKey.custom;
|
||||||
|
@ -813,13 +811,13 @@ class FieldCacheImpl implements FieldCache {
|
||||||
}
|
}
|
||||||
|
|
||||||
// inherit javadocs
|
// inherit javadocs
|
||||||
public double[] getDoubles(IndexReader reader, String field, boolean setDocsWithField)
|
public double[] getDoubles(AtomicReader reader, String field, boolean setDocsWithField)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
return getDoubles(reader, field, null, setDocsWithField);
|
return getDoubles(reader, field, null, setDocsWithField);
|
||||||
}
|
}
|
||||||
|
|
||||||
// inherit javadocs
|
// inherit javadocs
|
||||||
public double[] getDoubles(IndexReader reader, String field, FieldCache.DoubleParser parser, boolean setDocsWithField)
|
public double[] getDoubles(AtomicReader reader, String field, FieldCache.DoubleParser parser, boolean setDocsWithField)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
return (double[]) caches.get(Double.TYPE).get(reader, new Entry(field, parser), setDocsWithField);
|
return (double[]) caches.get(Double.TYPE).get(reader, new Entry(field, parser), setDocsWithField);
|
||||||
}
|
}
|
||||||
|
@ -830,7 +828,7 @@ class FieldCacheImpl implements FieldCache {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected Object createValue(IndexReader reader, Entry entryKey, boolean setDocsWithField)
|
protected Object createValue(AtomicReader reader, Entry entryKey, boolean setDocsWithField)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
String field = entryKey.field;
|
String field = entryKey.field;
|
||||||
FieldCache.DoubleParser parser = (FieldCache.DoubleParser) entryKey.custom;
|
FieldCache.DoubleParser parser = (FieldCache.DoubleParser) entryKey.custom;
|
||||||
|
@ -1075,11 +1073,11 @@ class FieldCacheImpl implements FieldCache {
|
||||||
|
|
||||||
private static boolean DEFAULT_FASTER_BUT_MORE_RAM = true;
|
private static boolean DEFAULT_FASTER_BUT_MORE_RAM = true;
|
||||||
|
|
||||||
public DocTermsIndex getTermsIndex(IndexReader reader, String field) throws IOException {
|
public DocTermsIndex getTermsIndex(AtomicReader reader, String field) throws IOException {
|
||||||
return getTermsIndex(reader, field, DEFAULT_FASTER_BUT_MORE_RAM);
|
return getTermsIndex(reader, field, DEFAULT_FASTER_BUT_MORE_RAM);
|
||||||
}
|
}
|
||||||
|
|
||||||
public DocTermsIndex getTermsIndex(IndexReader reader, String field, boolean fasterButMoreRAM) throws IOException {
|
public DocTermsIndex getTermsIndex(AtomicReader reader, String field, boolean fasterButMoreRAM) throws IOException {
|
||||||
return (DocTermsIndex) caches.get(DocTermsIndex.class).get(reader, new Entry(field, Boolean.valueOf(fasterButMoreRAM)), false);
|
return (DocTermsIndex) caches.get(DocTermsIndex.class).get(reader, new Entry(field, Boolean.valueOf(fasterButMoreRAM)), false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1089,7 +1087,7 @@ class FieldCacheImpl implements FieldCache {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected Object createValue(IndexReader reader, Entry entryKey, boolean setDocsWithField /* ignored */)
|
protected Object createValue(AtomicReader reader, Entry entryKey, boolean setDocsWithField /* ignored */)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
|
||||||
Terms terms = reader.terms(entryKey.field);
|
Terms terms = reader.terms(entryKey.field);
|
||||||
|
@ -1220,11 +1218,11 @@ class FieldCacheImpl implements FieldCache {
|
||||||
|
|
||||||
// TODO: this if DocTermsIndex was already created, we
|
// TODO: this if DocTermsIndex was already created, we
|
||||||
// should share it...
|
// should share it...
|
||||||
public DocTerms getTerms(IndexReader reader, String field) throws IOException {
|
public DocTerms getTerms(AtomicReader reader, String field) throws IOException {
|
||||||
return getTerms(reader, field, DEFAULT_FASTER_BUT_MORE_RAM);
|
return getTerms(reader, field, DEFAULT_FASTER_BUT_MORE_RAM);
|
||||||
}
|
}
|
||||||
|
|
||||||
public DocTerms getTerms(IndexReader reader, String field, boolean fasterButMoreRAM) throws IOException {
|
public DocTerms getTerms(AtomicReader reader, String field, boolean fasterButMoreRAM) throws IOException {
|
||||||
return (DocTerms) caches.get(DocTerms.class).get(reader, new Entry(field, Boolean.valueOf(fasterButMoreRAM)), false);
|
return (DocTerms) caches.get(DocTerms.class).get(reader, new Entry(field, Boolean.valueOf(fasterButMoreRAM)), false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1234,7 +1232,7 @@ class FieldCacheImpl implements FieldCache {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected Object createValue(IndexReader reader, Entry entryKey, boolean setDocsWithField /* ignored */)
|
protected Object createValue(AtomicReader reader, Entry entryKey, boolean setDocsWithField /* ignored */)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
|
||||||
Terms terms = reader.terms(entryKey.field);
|
Terms terms = reader.terms(entryKey.field);
|
||||||
|
@ -1308,7 +1306,7 @@ class FieldCacheImpl implements FieldCache {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public DocTermOrds getDocTermOrds(IndexReader reader, String field) throws IOException {
|
public DocTermOrds getDocTermOrds(AtomicReader reader, String field) throws IOException {
|
||||||
return (DocTermOrds) caches.get(DocTermOrds.class).get(reader, new Entry(field, null), false);
|
return (DocTermOrds) caches.get(DocTermOrds.class).get(reader, new Entry(field, null), false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1318,7 +1316,7 @@ class FieldCacheImpl implements FieldCache {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected Object createValue(IndexReader reader, Entry entryKey, boolean setDocsWithField /* ignored */)
|
protected Object createValue(AtomicReader reader, Entry entryKey, boolean setDocsWithField /* ignored */)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
return new DocTermOrds(reader, entryKey.field);
|
return new DocTermOrds(reader, entryKey.field);
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,8 +18,8 @@ package org.apache.lucene.search;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.AtomicReader; // for javadocs
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.util.NumericUtils;
|
import org.apache.lucene.util.NumericUtils;
|
||||||
import org.apache.lucene.util.Bits;
|
import org.apache.lucene.util.Bits;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
|
@ -84,7 +84,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
|
||||||
return new FieldCacheRangeFilter<String>(field, null, lowerVal, upperVal, includeLower, includeUpper) {
|
return new FieldCacheRangeFilter<String>(field, null, lowerVal, upperVal, includeLower, includeUpper) {
|
||||||
@Override
|
@Override
|
||||||
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
|
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
|
||||||
final FieldCache.DocTermsIndex fcsi = FieldCache.DEFAULT.getTermsIndex(context.reader, field);
|
final FieldCache.DocTermsIndex fcsi = FieldCache.DEFAULT.getTermsIndex(context.reader(), field);
|
||||||
final BytesRef spare = new BytesRef();
|
final BytesRef spare = new BytesRef();
|
||||||
final int lowerPoint = fcsi.binarySearchLookup(lowerVal == null ? null : new BytesRef(lowerVal), spare);
|
final int lowerPoint = fcsi.binarySearchLookup(lowerVal == null ? null : new BytesRef(lowerVal), spare);
|
||||||
final int upperPoint = fcsi.binarySearchLookup(upperVal == null ? null : new BytesRef(upperVal), spare);
|
final int upperPoint = fcsi.binarySearchLookup(upperVal == null ? null : new BytesRef(upperVal), spare);
|
||||||
|
@ -122,7 +122,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
|
||||||
|
|
||||||
assert inclusiveLowerPoint > 0 && inclusiveUpperPoint > 0;
|
assert inclusiveLowerPoint > 0 && inclusiveUpperPoint > 0;
|
||||||
|
|
||||||
return new FieldCacheDocIdSet(context.reader.maxDoc(), acceptDocs) {
|
return new FieldCacheDocIdSet(context.reader().maxDoc(), acceptDocs) {
|
||||||
@Override
|
@Override
|
||||||
protected final boolean matchDoc(int doc) {
|
protected final boolean matchDoc(int doc) {
|
||||||
final int docOrd = fcsi.getOrd(doc);
|
final int docOrd = fcsi.getOrd(doc);
|
||||||
|
@ -134,7 +134,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a numeric range filter using {@link FieldCache#getBytes(IndexReader,String,boolean)}. This works with all
|
* Creates a numeric range filter using {@link FieldCache#getBytes(AtomicReader,String,boolean)}. This works with all
|
||||||
* byte fields containing exactly one numeric term in the field. The range can be half-open by setting one
|
* byte fields containing exactly one numeric term in the field. The range can be half-open by setting one
|
||||||
* of the values to <code>null</code>.
|
* of the values to <code>null</code>.
|
||||||
*/
|
*/
|
||||||
|
@ -143,7 +143,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a numeric range filter using {@link FieldCache#getBytes(IndexReader,String,FieldCache.ByteParser,boolean)}. This works with all
|
* Creates a numeric range filter using {@link FieldCache#getBytes(AtomicReader,String,FieldCache.ByteParser,boolean)}. This works with all
|
||||||
* byte fields containing exactly one numeric term in the field. The range can be half-open by setting one
|
* byte fields containing exactly one numeric term in the field. The range can be half-open by setting one
|
||||||
* of the values to <code>null</code>.
|
* of the values to <code>null</code>.
|
||||||
*/
|
*/
|
||||||
|
@ -172,8 +172,8 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
|
||||||
if (inclusiveLowerPoint > inclusiveUpperPoint)
|
if (inclusiveLowerPoint > inclusiveUpperPoint)
|
||||||
return DocIdSet.EMPTY_DOCIDSET;
|
return DocIdSet.EMPTY_DOCIDSET;
|
||||||
|
|
||||||
final byte[] values = FieldCache.DEFAULT.getBytes(context.reader, field, (FieldCache.ByteParser) parser, false);
|
final byte[] values = FieldCache.DEFAULT.getBytes(context.reader(), field, (FieldCache.ByteParser) parser, false);
|
||||||
return new FieldCacheDocIdSet(context.reader.maxDoc(), acceptDocs) {
|
return new FieldCacheDocIdSet(context.reader().maxDoc(), acceptDocs) {
|
||||||
@Override
|
@Override
|
||||||
protected boolean matchDoc(int doc) {
|
protected boolean matchDoc(int doc) {
|
||||||
return values[doc] >= inclusiveLowerPoint && values[doc] <= inclusiveUpperPoint;
|
return values[doc] >= inclusiveLowerPoint && values[doc] <= inclusiveUpperPoint;
|
||||||
|
@ -184,7 +184,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a numeric range filter using {@link FieldCache#getShorts(IndexReader,String,boolean)}. This works with all
|
* Creates a numeric range filter using {@link FieldCache#getShorts(AtomicReader,String,boolean)}. This works with all
|
||||||
* short fields containing exactly one numeric term in the field. The range can be half-open by setting one
|
* short fields containing exactly one numeric term in the field. The range can be half-open by setting one
|
||||||
* of the values to <code>null</code>.
|
* of the values to <code>null</code>.
|
||||||
*/
|
*/
|
||||||
|
@ -193,7 +193,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a numeric range filter using {@link FieldCache#getShorts(IndexReader,String,FieldCache.ShortParser,boolean)}. This works with all
|
* Creates a numeric range filter using {@link FieldCache#getShorts(AtomicReader,String,FieldCache.ShortParser,boolean)}. This works with all
|
||||||
* short fields containing exactly one numeric term in the field. The range can be half-open by setting one
|
* short fields containing exactly one numeric term in the field. The range can be half-open by setting one
|
||||||
* of the values to <code>null</code>.
|
* of the values to <code>null</code>.
|
||||||
*/
|
*/
|
||||||
|
@ -222,8 +222,8 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
|
||||||
if (inclusiveLowerPoint > inclusiveUpperPoint)
|
if (inclusiveLowerPoint > inclusiveUpperPoint)
|
||||||
return DocIdSet.EMPTY_DOCIDSET;
|
return DocIdSet.EMPTY_DOCIDSET;
|
||||||
|
|
||||||
final short[] values = FieldCache.DEFAULT.getShorts(context.reader, field, (FieldCache.ShortParser) parser, false);
|
final short[] values = FieldCache.DEFAULT.getShorts(context.reader(), field, (FieldCache.ShortParser) parser, false);
|
||||||
return new FieldCacheDocIdSet(context.reader.maxDoc(), acceptDocs) {
|
return new FieldCacheDocIdSet(context.reader().maxDoc(), acceptDocs) {
|
||||||
@Override
|
@Override
|
||||||
protected boolean matchDoc(int doc) {
|
protected boolean matchDoc(int doc) {
|
||||||
return values[doc] >= inclusiveLowerPoint && values[doc] <= inclusiveUpperPoint;
|
return values[doc] >= inclusiveLowerPoint && values[doc] <= inclusiveUpperPoint;
|
||||||
|
@ -234,7 +234,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a numeric range filter using {@link FieldCache#getInts(IndexReader,String,boolean)}. This works with all
|
* Creates a numeric range filter using {@link FieldCache#getInts(AtomicReader,String,boolean)}. This works with all
|
||||||
* int fields containing exactly one numeric term in the field. The range can be half-open by setting one
|
* int fields containing exactly one numeric term in the field. The range can be half-open by setting one
|
||||||
* of the values to <code>null</code>.
|
* of the values to <code>null</code>.
|
||||||
*/
|
*/
|
||||||
|
@ -243,7 +243,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a numeric range filter using {@link FieldCache#getInts(IndexReader,String,FieldCache.IntParser,boolean)}. This works with all
|
* Creates a numeric range filter using {@link FieldCache#getInts(AtomicReader,String,FieldCache.IntParser,boolean)}. This works with all
|
||||||
* int fields containing exactly one numeric term in the field. The range can be half-open by setting one
|
* int fields containing exactly one numeric term in the field. The range can be half-open by setting one
|
||||||
* of the values to <code>null</code>.
|
* of the values to <code>null</code>.
|
||||||
*/
|
*/
|
||||||
|
@ -272,8 +272,8 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
|
||||||
if (inclusiveLowerPoint > inclusiveUpperPoint)
|
if (inclusiveLowerPoint > inclusiveUpperPoint)
|
||||||
return DocIdSet.EMPTY_DOCIDSET;
|
return DocIdSet.EMPTY_DOCIDSET;
|
||||||
|
|
||||||
final int[] values = FieldCache.DEFAULT.getInts(context.reader, field, (FieldCache.IntParser) parser, false);
|
final int[] values = FieldCache.DEFAULT.getInts(context.reader(), field, (FieldCache.IntParser) parser, false);
|
||||||
return new FieldCacheDocIdSet(context.reader.maxDoc(), acceptDocs) {
|
return new FieldCacheDocIdSet(context.reader().maxDoc(), acceptDocs) {
|
||||||
@Override
|
@Override
|
||||||
protected boolean matchDoc(int doc) {
|
protected boolean matchDoc(int doc) {
|
||||||
return values[doc] >= inclusiveLowerPoint && values[doc] <= inclusiveUpperPoint;
|
return values[doc] >= inclusiveLowerPoint && values[doc] <= inclusiveUpperPoint;
|
||||||
|
@ -284,7 +284,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a numeric range filter using {@link FieldCache#getLongs(IndexReader,String,boolean)}. This works with all
|
* Creates a numeric range filter using {@link FieldCache#getLongs(AtomicReader,String,boolean)}. This works with all
|
||||||
* long fields containing exactly one numeric term in the field. The range can be half-open by setting one
|
* long fields containing exactly one numeric term in the field. The range can be half-open by setting one
|
||||||
* of the values to <code>null</code>.
|
* of the values to <code>null</code>.
|
||||||
*/
|
*/
|
||||||
|
@ -293,7 +293,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a numeric range filter using {@link FieldCache#getLongs(IndexReader,String,FieldCache.LongParser,boolean)}. This works with all
|
* Creates a numeric range filter using {@link FieldCache#getLongs(AtomicReader,String,FieldCache.LongParser,boolean)}. This works with all
|
||||||
* long fields containing exactly one numeric term in the field. The range can be half-open by setting one
|
* long fields containing exactly one numeric term in the field. The range can be half-open by setting one
|
||||||
* of the values to <code>null</code>.
|
* of the values to <code>null</code>.
|
||||||
*/
|
*/
|
||||||
|
@ -322,8 +322,8 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
|
||||||
if (inclusiveLowerPoint > inclusiveUpperPoint)
|
if (inclusiveLowerPoint > inclusiveUpperPoint)
|
||||||
return DocIdSet.EMPTY_DOCIDSET;
|
return DocIdSet.EMPTY_DOCIDSET;
|
||||||
|
|
||||||
final long[] values = FieldCache.DEFAULT.getLongs(context.reader, field, (FieldCache.LongParser) parser, false);
|
final long[] values = FieldCache.DEFAULT.getLongs(context.reader(), field, (FieldCache.LongParser) parser, false);
|
||||||
return new FieldCacheDocIdSet(context.reader.maxDoc(), acceptDocs) {
|
return new FieldCacheDocIdSet(context.reader().maxDoc(), acceptDocs) {
|
||||||
@Override
|
@Override
|
||||||
protected boolean matchDoc(int doc) {
|
protected boolean matchDoc(int doc) {
|
||||||
return values[doc] >= inclusiveLowerPoint && values[doc] <= inclusiveUpperPoint;
|
return values[doc] >= inclusiveLowerPoint && values[doc] <= inclusiveUpperPoint;
|
||||||
|
@ -334,7 +334,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a numeric range filter using {@link FieldCache#getFloats(IndexReader,String,boolean)}. This works with all
|
* Creates a numeric range filter using {@link FieldCache#getFloats(AtomicReader,String,boolean)}. This works with all
|
||||||
* float fields containing exactly one numeric term in the field. The range can be half-open by setting one
|
* float fields containing exactly one numeric term in the field. The range can be half-open by setting one
|
||||||
* of the values to <code>null</code>.
|
* of the values to <code>null</code>.
|
||||||
*/
|
*/
|
||||||
|
@ -343,7 +343,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a numeric range filter using {@link FieldCache#getFloats(IndexReader,String,FieldCache.FloatParser,boolean)}. This works with all
|
* Creates a numeric range filter using {@link FieldCache#getFloats(AtomicReader,String,FieldCache.FloatParser,boolean)}. This works with all
|
||||||
* float fields containing exactly one numeric term in the field. The range can be half-open by setting one
|
* float fields containing exactly one numeric term in the field. The range can be half-open by setting one
|
||||||
* of the values to <code>null</code>.
|
* of the values to <code>null</code>.
|
||||||
*/
|
*/
|
||||||
|
@ -376,8 +376,8 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
|
||||||
if (inclusiveLowerPoint > inclusiveUpperPoint)
|
if (inclusiveLowerPoint > inclusiveUpperPoint)
|
||||||
return DocIdSet.EMPTY_DOCIDSET;
|
return DocIdSet.EMPTY_DOCIDSET;
|
||||||
|
|
||||||
final float[] values = FieldCache.DEFAULT.getFloats(context.reader, field, (FieldCache.FloatParser) parser, false);
|
final float[] values = FieldCache.DEFAULT.getFloats(context.reader(), field, (FieldCache.FloatParser) parser, false);
|
||||||
return new FieldCacheDocIdSet(context.reader.maxDoc(), acceptDocs) {
|
return new FieldCacheDocIdSet(context.reader().maxDoc(), acceptDocs) {
|
||||||
@Override
|
@Override
|
||||||
protected boolean matchDoc(int doc) {
|
protected boolean matchDoc(int doc) {
|
||||||
return values[doc] >= inclusiveLowerPoint && values[doc] <= inclusiveUpperPoint;
|
return values[doc] >= inclusiveLowerPoint && values[doc] <= inclusiveUpperPoint;
|
||||||
|
@ -388,7 +388,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a numeric range filter using {@link FieldCache#getDoubles(IndexReader,String,boolean)}. This works with all
|
* Creates a numeric range filter using {@link FieldCache#getDoubles(AtomicReader,String,boolean)}. This works with all
|
||||||
* double fields containing exactly one numeric term in the field. The range can be half-open by setting one
|
* double fields containing exactly one numeric term in the field. The range can be half-open by setting one
|
||||||
* of the values to <code>null</code>.
|
* of the values to <code>null</code>.
|
||||||
*/
|
*/
|
||||||
|
@ -397,7 +397,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a numeric range filter using {@link FieldCache#getDoubles(IndexReader,String,FieldCache.DoubleParser,boolean)}. This works with all
|
* Creates a numeric range filter using {@link FieldCache#getDoubles(AtomicReader,String,FieldCache.DoubleParser,boolean)}. This works with all
|
||||||
* double fields containing exactly one numeric term in the field. The range can be half-open by setting one
|
* double fields containing exactly one numeric term in the field. The range can be half-open by setting one
|
||||||
* of the values to <code>null</code>.
|
* of the values to <code>null</code>.
|
||||||
*/
|
*/
|
||||||
|
@ -430,9 +430,9 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
|
||||||
if (inclusiveLowerPoint > inclusiveUpperPoint)
|
if (inclusiveLowerPoint > inclusiveUpperPoint)
|
||||||
return DocIdSet.EMPTY_DOCIDSET;
|
return DocIdSet.EMPTY_DOCIDSET;
|
||||||
|
|
||||||
final double[] values = FieldCache.DEFAULT.getDoubles(context.reader, field, (FieldCache.DoubleParser) parser, false);
|
final double[] values = FieldCache.DEFAULT.getDoubles(context.reader(), field, (FieldCache.DoubleParser) parser, false);
|
||||||
// ignore deleted docs if range doesn't contain 0
|
// ignore deleted docs if range doesn't contain 0
|
||||||
return new FieldCacheDocIdSet(context.reader.maxDoc(), acceptDocs) {
|
return new FieldCacheDocIdSet(context.reader().maxDoc(), acceptDocs) {
|
||||||
@Override
|
@Override
|
||||||
protected boolean matchDoc(int doc) {
|
protected boolean matchDoc(int doc) {
|
||||||
return values[doc] >= inclusiveLowerPoint && values[doc] <= inclusiveUpperPoint;
|
return values[doc] >= inclusiveLowerPoint && values[doc] <= inclusiveUpperPoint;
|
||||||
|
|
|
@ -19,9 +19,9 @@ package org.apache.lucene.search;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.index.DocsEnum; // javadoc @link
|
import org.apache.lucene.index.DocsEnum; // javadoc @link
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
|
||||||
import org.apache.lucene.util.FixedBitSet;
|
import org.apache.lucene.util.FixedBitSet;
|
||||||
import org.apache.lucene.util.Bits;
|
import org.apache.lucene.util.Bits;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
|
@ -118,7 +118,7 @@ public class FieldCacheTermsFilter extends Filter {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
|
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
|
||||||
final FieldCache.DocTermsIndex fcsi = getFieldCache().getTermsIndex(context.reader, field);
|
final FieldCache.DocTermsIndex fcsi = getFieldCache().getTermsIndex(context.reader(), field);
|
||||||
final FixedBitSet bits = new FixedBitSet(fcsi.numOrd());
|
final FixedBitSet bits = new FixedBitSet(fcsi.numOrd());
|
||||||
final BytesRef spare = new BytesRef();
|
final BytesRef spare = new BytesRef();
|
||||||
for (int i=0;i<terms.length;i++) {
|
for (int i=0;i<terms.length;i++) {
|
||||||
|
@ -127,7 +127,7 @@ public class FieldCacheTermsFilter extends Filter {
|
||||||
bits.set(termNumber);
|
bits.set(termNumber);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return new FieldCacheDocIdSet(context.reader.maxDoc(), acceptDocs) {
|
return new FieldCacheDocIdSet(context.reader().maxDoc(), acceptDocs) {
|
||||||
@Override
|
@Override
|
||||||
protected final boolean matchDoc(int doc) {
|
protected final boolean matchDoc(int doc) {
|
||||||
return bits.get(fcsi.getOrd(doc));
|
return bits.get(fcsi.getOrd(doc));
|
||||||
|
|
|
@ -20,9 +20,9 @@ package org.apache.lucene.search;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Comparator;
|
import java.util.Comparator;
|
||||||
|
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
import org.apache.lucene.index.AtomicReader; // javadocs
|
||||||
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.index.DocValues;
|
import org.apache.lucene.index.DocValues;
|
||||||
import org.apache.lucene.index.IndexReader;
|
|
||||||
import org.apache.lucene.search.FieldCache.ByteParser;
|
import org.apache.lucene.search.FieldCache.ByteParser;
|
||||||
import org.apache.lucene.search.FieldCache.DocTerms;
|
import org.apache.lucene.search.FieldCache.DocTerms;
|
||||||
import org.apache.lucene.search.FieldCache.DocTermsIndex;
|
import org.apache.lucene.search.FieldCache.DocTermsIndex;
|
||||||
|
@ -72,7 +72,7 @@ import org.apache.lucene.util.packed.PackedInts;
|
||||||
* priority queue. The {@link FieldValueHitQueue}
|
* priority queue. The {@link FieldValueHitQueue}
|
||||||
* calls this method when a new hit is competitive.
|
* calls this method when a new hit is competitive.
|
||||||
*
|
*
|
||||||
* <li> {@link #setNextReader(IndexReader.AtomicReaderContext)} Invoked
|
* <li> {@link #setNextReader(AtomicReaderContext)} Invoked
|
||||||
* when the search is switching to the next segment.
|
* when the search is switching to the next segment.
|
||||||
* You may need to update internal state of the
|
* You may need to update internal state of the
|
||||||
* comparator, for example retrieving new values from
|
* comparator, for example retrieving new values from
|
||||||
|
@ -203,7 +203,7 @@ public abstract class FieldComparator<T> {
|
||||||
@Override
|
@Override
|
||||||
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
||||||
if (missingValue != null) {
|
if (missingValue != null) {
|
||||||
docsWithField = FieldCache.DEFAULT.getDocsWithField(context.reader, field);
|
docsWithField = FieldCache.DEFAULT.getDocsWithField(context.reader(), field);
|
||||||
// optimization to remove unneeded checks on the bit interface:
|
// optimization to remove unneeded checks on the bit interface:
|
||||||
if (docsWithField instanceof Bits.MatchAllBits) {
|
if (docsWithField instanceof Bits.MatchAllBits) {
|
||||||
docsWithField = null;
|
docsWithField = null;
|
||||||
|
@ -261,7 +261,7 @@ public abstract class FieldComparator<T> {
|
||||||
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
||||||
// NOTE: must do this before calling super otherwise
|
// NOTE: must do this before calling super otherwise
|
||||||
// we compute the docsWithField Bits twice!
|
// we compute the docsWithField Bits twice!
|
||||||
currentReaderValues = FieldCache.DEFAULT.getBytes(context.reader, field, parser, missingValue != null);
|
currentReaderValues = FieldCache.DEFAULT.getBytes(context.reader(), field, parser, missingValue != null);
|
||||||
return super.setNextReader(context);
|
return super.setNextReader(context);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -338,7 +338,7 @@ public abstract class FieldComparator<T> {
|
||||||
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
||||||
// NOTE: must do this before calling super otherwise
|
// NOTE: must do this before calling super otherwise
|
||||||
// we compute the docsWithField Bits twice!
|
// we compute the docsWithField Bits twice!
|
||||||
currentReaderValues = FieldCache.DEFAULT.getDoubles(context.reader, field, parser, missingValue != null);
|
currentReaderValues = FieldCache.DEFAULT.getDoubles(context.reader(), field, parser, missingValue != null);
|
||||||
return super.setNextReader(context);
|
return super.setNextReader(context);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -397,7 +397,7 @@ public abstract class FieldComparator<T> {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
||||||
final DocValues docValues = context.reader.docValues(field);
|
final DocValues docValues = context.reader().docValues(field);
|
||||||
if (docValues != null) {
|
if (docValues != null) {
|
||||||
currentReaderValues = docValues.getSource();
|
currentReaderValues = docValues.getSource();
|
||||||
} else {
|
} else {
|
||||||
|
@ -481,7 +481,7 @@ public abstract class FieldComparator<T> {
|
||||||
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
||||||
// NOTE: must do this before calling super otherwise
|
// NOTE: must do this before calling super otherwise
|
||||||
// we compute the docsWithField Bits twice!
|
// we compute the docsWithField Bits twice!
|
||||||
currentReaderValues = FieldCache.DEFAULT.getFloats(context.reader, field, parser, missingValue != null);
|
currentReaderValues = FieldCache.DEFAULT.getFloats(context.reader(), field, parser, missingValue != null);
|
||||||
return super.setNextReader(context);
|
return super.setNextReader(context);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -543,7 +543,7 @@ public abstract class FieldComparator<T> {
|
||||||
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
||||||
// NOTE: must do this before calling super otherwise
|
// NOTE: must do this before calling super otherwise
|
||||||
// we compute the docsWithField Bits twice!
|
// we compute the docsWithField Bits twice!
|
||||||
currentReaderValues = FieldCache.DEFAULT.getShorts(context.reader, field, parser, missingValue != null);
|
currentReaderValues = FieldCache.DEFAULT.getShorts(context.reader(), field, parser, missingValue != null);
|
||||||
return super.setNextReader(context);
|
return super.setNextReader(context);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -627,7 +627,7 @@ public abstract class FieldComparator<T> {
|
||||||
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
||||||
// NOTE: must do this before calling super otherwise
|
// NOTE: must do this before calling super otherwise
|
||||||
// we compute the docsWithField Bits twice!
|
// we compute the docsWithField Bits twice!
|
||||||
currentReaderValues = FieldCache.DEFAULT.getInts(context.reader, field, parser, missingValue != null);
|
currentReaderValues = FieldCache.DEFAULT.getInts(context.reader(), field, parser, missingValue != null);
|
||||||
return super.setNextReader(context);
|
return super.setNextReader(context);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -690,7 +690,7 @@ public abstract class FieldComparator<T> {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
||||||
DocValues docValues = context.reader.docValues(field);
|
DocValues docValues = context.reader().docValues(field);
|
||||||
if (docValues != null) {
|
if (docValues != null) {
|
||||||
currentReaderValues = docValues.getSource();
|
currentReaderValues = docValues.getSource();
|
||||||
} else {
|
} else {
|
||||||
|
@ -775,7 +775,7 @@ public abstract class FieldComparator<T> {
|
||||||
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
||||||
// NOTE: must do this before calling super otherwise
|
// NOTE: must do this before calling super otherwise
|
||||||
// we compute the docsWithField Bits twice!
|
// we compute the docsWithField Bits twice!
|
||||||
currentReaderValues = FieldCache.DEFAULT.getLongs(context.reader, field, parser, missingValue != null);
|
currentReaderValues = FieldCache.DEFAULT.getLongs(context.reader(), field, parser, missingValue != null);
|
||||||
return super.setNextReader(context);
|
return super.setNextReader(context);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1288,7 +1288,7 @@ public abstract class FieldComparator<T> {
|
||||||
@Override
|
@Override
|
||||||
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
||||||
final int docBase = context.docBase;
|
final int docBase = context.docBase;
|
||||||
termsIndex = FieldCache.DEFAULT.getTermsIndex(context.reader, field);
|
termsIndex = FieldCache.DEFAULT.getTermsIndex(context.reader(), field);
|
||||||
final PackedInts.Reader docToOrd = termsIndex.getDocToOrd();
|
final PackedInts.Reader docToOrd = termsIndex.getDocToOrd();
|
||||||
FieldComparator perSegComp = null;
|
FieldComparator perSegComp = null;
|
||||||
if (docToOrd.hasArray()) {
|
if (docToOrd.hasArray()) {
|
||||||
|
@ -1706,19 +1706,19 @@ public abstract class FieldComparator<T> {
|
||||||
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
||||||
final int docBase = context.docBase;
|
final int docBase = context.docBase;
|
||||||
|
|
||||||
final DocValues dv = context.reader.docValues(field);
|
final DocValues dv = context.reader().docValues(field);
|
||||||
if (dv == null) {
|
if (dv == null) {
|
||||||
// This may mean entire segment had no docs with
|
// This may mean entire segment had no docs with
|
||||||
// this DV field; use default field value (empty
|
// this DV field; use default field value (empty
|
||||||
// byte[]) in this case:
|
// byte[]) in this case:
|
||||||
termsIndex = DocValues.getDefaultSortedSource(DocValues.Type.BYTES_VAR_SORTED, context.reader.maxDoc());
|
termsIndex = DocValues.getDefaultSortedSource(DocValues.Type.BYTES_VAR_SORTED, context.reader().maxDoc());
|
||||||
} else {
|
} else {
|
||||||
termsIndex = dv.getSource().asSortedSource();
|
termsIndex = dv.getSource().asSortedSource();
|
||||||
if (termsIndex == null) {
|
if (termsIndex == null) {
|
||||||
// This means segment has doc values, but they are
|
// This means segment has doc values, but they are
|
||||||
// not able to provide a sorted source; consider
|
// not able to provide a sorted source; consider
|
||||||
// this a hard error:
|
// this a hard error:
|
||||||
throw new IllegalStateException("DocValues exist for field \"" + field + "\", but not as a sorted source: type=" + dv.getSource().type() + " reader=" + context.reader);
|
throw new IllegalStateException("DocValues exist for field \"" + field + "\", but not as a sorted source: type=" + dv.getSource().type() + " reader=" + context.reader());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1853,7 +1853,7 @@ public abstract class FieldComparator<T> {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
||||||
docTerms = FieldCache.DEFAULT.getTerms(context.reader, field);
|
docTerms = FieldCache.DEFAULT.getTerms(context.reader(), field);
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1885,7 +1885,7 @@ public abstract class FieldComparator<T> {
|
||||||
* comparisons are done using BytesRef.compareTo, which is
|
* comparisons are done using BytesRef.compareTo, which is
|
||||||
* slow for medium to large result sets but possibly
|
* slow for medium to large result sets but possibly
|
||||||
* very fast for very small results sets. The BytesRef
|
* very fast for very small results sets. The BytesRef
|
||||||
* values are obtained using {@link IndexReader#docValues}. */
|
* values are obtained using {@link AtomicReader#docValues}. */
|
||||||
public static final class TermValDocValuesComparator extends FieldComparator<BytesRef> {
|
public static final class TermValDocValuesComparator extends FieldComparator<BytesRef> {
|
||||||
|
|
||||||
private BytesRef[] values;
|
private BytesRef[] values;
|
||||||
|
@ -1922,7 +1922,7 @@ public abstract class FieldComparator<T> {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
||||||
final DocValues dv = context.reader.docValues(field);
|
final DocValues dv = context.reader().docValues(field);
|
||||||
if (dv != null) {
|
if (dv != null) {
|
||||||
docTerms = dv.getSource();
|
docTerms = dv.getSource();
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -18,7 +18,7 @@ package org.apache.lucene.search;
|
||||||
*/
|
*/
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.util.Bits;
|
import org.apache.lucene.util.Bits;
|
||||||
import org.apache.lucene.util.Bits.MatchAllBits;
|
import org.apache.lucene.util.Bits.MatchAllBits;
|
||||||
import org.apache.lucene.util.Bits.MatchNoBits;
|
import org.apache.lucene.util.Bits.MatchNoBits;
|
||||||
|
@ -77,12 +77,12 @@ public class FieldValueFilter extends Filter {
|
||||||
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs)
|
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
final Bits docsWithField = FieldCache.DEFAULT.getDocsWithField(
|
final Bits docsWithField = FieldCache.DEFAULT.getDocsWithField(
|
||||||
context.reader, field);
|
context.reader(), field);
|
||||||
if (negate) {
|
if (negate) {
|
||||||
if (docsWithField instanceof MatchAllBits) {
|
if (docsWithField instanceof MatchAllBits) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
return new FieldCacheDocIdSet(context.reader.maxDoc(), acceptDocs) {
|
return new FieldCacheDocIdSet(context.reader().maxDoc(), acceptDocs) {
|
||||||
@Override
|
@Override
|
||||||
protected final boolean matchDoc(int doc) {
|
protected final boolean matchDoc(int doc) {
|
||||||
return !docsWithField.get(doc);
|
return !docsWithField.get(doc);
|
||||||
|
@ -97,7 +97,7 @@ public class FieldValueFilter extends Filter {
|
||||||
// :-)
|
// :-)
|
||||||
return BitsFilteredDocIdSet.wrap((DocIdSet) docsWithField, acceptDocs);
|
return BitsFilteredDocIdSet.wrap((DocIdSet) docsWithField, acceptDocs);
|
||||||
}
|
}
|
||||||
return new FieldCacheDocIdSet(context.reader.maxDoc(), acceptDocs) {
|
return new FieldCacheDocIdSet(context.reader().maxDoc(), acceptDocs) {
|
||||||
@Override
|
@Override
|
||||||
protected final boolean matchDoc(int doc) {
|
protected final boolean matchDoc(int doc) {
|
||||||
return docsWithField.get(doc);
|
return docsWithField.get(doc);
|
||||||
|
|
|
@ -19,8 +19,9 @@ package org.apache.lucene.search;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.lucene.index.AtomicReader; // javadocs
|
||||||
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.index.IndexReader; // javadocs
|
import org.apache.lucene.index.IndexReader; // javadocs
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
|
||||||
import org.apache.lucene.util.Bits;
|
import org.apache.lucene.util.Bits;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -44,7 +45,7 @@ public abstract class Filter {
|
||||||
* represent the whole underlying index i.e. if the index has more than
|
* represent the whole underlying index i.e. if the index has more than
|
||||||
* one segment the given reader only represents a single segment.
|
* one segment the given reader only represents a single segment.
|
||||||
* The provided context is always an atomic context, so you can call
|
* The provided context is always an atomic context, so you can call
|
||||||
* {@link IndexReader#fields()}
|
* {@link AtomicReader#fields()}
|
||||||
* on the context's reader, for example.
|
* on the context's reader, for example.
|
||||||
*
|
*
|
||||||
* @param acceptDocs
|
* @param acceptDocs
|
||||||
|
|
|
@ -17,8 +17,8 @@ package org.apache.lucene.search;
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
|
||||||
import org.apache.lucene.index.Term;
|
import org.apache.lucene.index.Term;
|
||||||
import org.apache.lucene.util.Bits;
|
import org.apache.lucene.util.Bits;
|
||||||
import org.apache.lucene.util.ToStringUtils;
|
import org.apache.lucene.util.ToStringUtils;
|
||||||
|
@ -100,7 +100,7 @@ public class FilteredQuery extends Query {
|
||||||
public Explanation explain (AtomicReaderContext ir, int i) throws IOException {
|
public Explanation explain (AtomicReaderContext ir, int i) throws IOException {
|
||||||
Explanation inner = weight.explain (ir, i);
|
Explanation inner = weight.explain (ir, i);
|
||||||
Filter f = FilteredQuery.this.filter;
|
Filter f = FilteredQuery.this.filter;
|
||||||
DocIdSet docIdSet = f.getDocIdSet(ir, ir.reader.getLiveDocs());
|
DocIdSet docIdSet = f.getDocIdSet(ir, ir.reader().getLiveDocs());
|
||||||
DocIdSetIterator docIdSetIterator = docIdSet == null ? DocIdSet.EMPTY_DOCIDSET.iterator() : docIdSet.iterator();
|
DocIdSetIterator docIdSetIterator = docIdSet == null ? DocIdSet.EMPTY_DOCIDSET.iterator() : docIdSet.iterator();
|
||||||
if (docIdSetIterator == null) {
|
if (docIdSetIterator == null) {
|
||||||
docIdSetIterator = DocIdSet.EMPTY_DOCIDSET.iterator();
|
docIdSetIterator = DocIdSet.EMPTY_DOCIDSET.iterator();
|
||||||
|
|
|
@ -31,11 +31,12 @@ import java.util.concurrent.locks.Lock;
|
||||||
import java.util.concurrent.locks.ReentrantLock;
|
import java.util.concurrent.locks.ReentrantLock;
|
||||||
|
|
||||||
import org.apache.lucene.document.Document;
|
import org.apache.lucene.document.Document;
|
||||||
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.index.CorruptIndexException;
|
import org.apache.lucene.index.CorruptIndexException;
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
import org.apache.lucene.index.DirectoryReader; // javadocs
|
||||||
import org.apache.lucene.index.IndexReader.ReaderContext;
|
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
import org.apache.lucene.index.MultiFields;
|
import org.apache.lucene.index.MultiFields;
|
||||||
|
import org.apache.lucene.index.IndexReaderContext;
|
||||||
import org.apache.lucene.index.StoredFieldVisitor;
|
import org.apache.lucene.index.StoredFieldVisitor;
|
||||||
import org.apache.lucene.index.Term;
|
import org.apache.lucene.index.Term;
|
||||||
import org.apache.lucene.index.Terms;
|
import org.apache.lucene.index.Terms;
|
||||||
|
@ -56,10 +57,11 @@ import org.apache.lucene.util.ThreadInterruptedException;
|
||||||
* multiple searches instead of creating a new one
|
* multiple searches instead of creating a new one
|
||||||
* per-search. If your index has changed and you wish to
|
* per-search. If your index has changed and you wish to
|
||||||
* see the changes reflected in searching, you should
|
* see the changes reflected in searching, you should
|
||||||
* use {@link IndexReader#openIfChanged} to obtain a new reader and
|
* use {@link DirectoryReader#openIfChanged(DirectoryReader)}
|
||||||
|
* to obtain a new reader and
|
||||||
* then create a new IndexSearcher from that. Also, for
|
* then create a new IndexSearcher from that. Also, for
|
||||||
* low-latency turnaround it's best to use a near-real-time
|
* low-latency turnaround it's best to use a near-real-time
|
||||||
* reader ({@link IndexReader#open(IndexWriter,boolean)}).
|
* reader ({@link DirectoryReader#open(IndexWriter,boolean)}).
|
||||||
* Once you have a new {@link IndexReader}, it's relatively
|
* Once you have a new {@link IndexReader}, it's relatively
|
||||||
* cheap to create a new IndexSearcher from it.
|
* cheap to create a new IndexSearcher from it.
|
||||||
*
|
*
|
||||||
|
@ -76,7 +78,7 @@ public class IndexSearcher {
|
||||||
|
|
||||||
// NOTE: these members might change in incompatible ways
|
// NOTE: these members might change in incompatible ways
|
||||||
// in the next release
|
// in the next release
|
||||||
protected final ReaderContext readerContext;
|
protected final IndexReaderContext readerContext;
|
||||||
protected final AtomicReaderContext[] leafContexts;
|
protected final AtomicReaderContext[] leafContexts;
|
||||||
// used with executor - each slice holds a set of leafs executed within one thread
|
// used with executor - each slice holds a set of leafs executed within one thread
|
||||||
protected final LeafSlice[] leafSlices;
|
protected final LeafSlice[] leafSlices;
|
||||||
|
@ -122,7 +124,7 @@ public class IndexSearcher {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a searcher searching the provided top-level {@link ReaderContext}.
|
* Creates a searcher searching the provided top-level {@link IndexReaderContext}.
|
||||||
* <p>
|
* <p>
|
||||||
* Given a non-<code>null</code> {@link ExecutorService} this method runs
|
* Given a non-<code>null</code> {@link ExecutorService} this method runs
|
||||||
* searches for each segment separately, using the provided ExecutorService.
|
* searches for each segment separately, using the provided ExecutorService.
|
||||||
|
@ -133,13 +135,13 @@ public class IndexSearcher {
|
||||||
* silently close file descriptors (see <a
|
* silently close file descriptors (see <a
|
||||||
* href="https://issues.apache.org/jira/browse/LUCENE-2239">LUCENE-2239</a>).
|
* href="https://issues.apache.org/jira/browse/LUCENE-2239">LUCENE-2239</a>).
|
||||||
*
|
*
|
||||||
* @see ReaderContext
|
* @see IndexReaderContext
|
||||||
* @see IndexReader#getTopReaderContext()
|
* @see IndexReader#getTopReaderContext()
|
||||||
* @lucene.experimental
|
* @lucene.experimental
|
||||||
*/
|
*/
|
||||||
public IndexSearcher(ReaderContext context, ExecutorService executor) {
|
public IndexSearcher(IndexReaderContext context, ExecutorService executor) {
|
||||||
assert context.isTopLevel: "IndexSearcher's ReaderContext must be topLevel for reader" + context.reader;
|
assert context.isTopLevel: "IndexSearcher's ReaderContext must be topLevel for reader" + context.reader();
|
||||||
reader = context.reader;
|
reader = context.reader();
|
||||||
this.executor = executor;
|
this.executor = executor;
|
||||||
this.readerContext = context;
|
this.readerContext = context;
|
||||||
leafContexts = ReaderUtil.leaves(context);
|
leafContexts = ReaderUtil.leaves(context);
|
||||||
|
@ -147,13 +149,13 @@ public class IndexSearcher {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a searcher searching the provided top-level {@link ReaderContext}.
|
* Creates a searcher searching the provided top-level {@link IndexReaderContext}.
|
||||||
*
|
*
|
||||||
* @see ReaderContext
|
* @see IndexReaderContext
|
||||||
* @see IndexReader#getTopReaderContext()
|
* @see IndexReader#getTopReaderContext()
|
||||||
* @lucene.experimental
|
* @lucene.experimental
|
||||||
*/
|
*/
|
||||||
public IndexSearcher(ReaderContext context) {
|
public IndexSearcher(IndexReaderContext context) {
|
||||||
this(context, null);
|
this(context, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -402,7 +404,7 @@ public class IndexSearcher {
|
||||||
* <p>NOTE: this does not compute scores by default. If you
|
* <p>NOTE: this does not compute scores by default. If you
|
||||||
* need scores, create a {@link TopFieldCollector}
|
* need scores, create a {@link TopFieldCollector}
|
||||||
* instance by calling {@link TopFieldCollector#create} and
|
* instance by calling {@link TopFieldCollector#create} and
|
||||||
* then pass that to {@link #search(IndexReader.AtomicReaderContext[], Weight,
|
* then pass that to {@link #search(AtomicReaderContext[], Weight,
|
||||||
* Collector)}.</p>
|
* Collector)}.</p>
|
||||||
*/
|
*/
|
||||||
protected TopFieldDocs search(Weight weight, int nDocs,
|
protected TopFieldDocs search(Weight weight, int nDocs,
|
||||||
|
@ -451,7 +453,7 @@ public class IndexSearcher {
|
||||||
* <p>NOTE: this does not compute scores by default. If you
|
* <p>NOTE: this does not compute scores by default. If you
|
||||||
* need scores, create a {@link TopFieldCollector}
|
* need scores, create a {@link TopFieldCollector}
|
||||||
* instance by calling {@link TopFieldCollector#create} and
|
* instance by calling {@link TopFieldCollector#create} and
|
||||||
* then pass that to {@link #search(IndexReader.AtomicReaderContext[], Weight,
|
* then pass that to {@link #search(AtomicReaderContext[], Weight,
|
||||||
* Collector)}.</p>
|
* Collector)}.</p>
|
||||||
*/
|
*/
|
||||||
protected TopFieldDocs search(AtomicReaderContext[] leaves, Weight weight, int nDocs,
|
protected TopFieldDocs search(AtomicReaderContext[] leaves, Weight weight, int nDocs,
|
||||||
|
@ -501,7 +503,7 @@ public class IndexSearcher {
|
||||||
// always use single thread:
|
// always use single thread:
|
||||||
for (int i = 0; i < leaves.length; i++) { // search each subreader
|
for (int i = 0; i < leaves.length; i++) { // search each subreader
|
||||||
collector.setNextReader(leaves[i]);
|
collector.setNextReader(leaves[i]);
|
||||||
Scorer scorer = weight.scorer(leaves[i], !collector.acceptsDocsOutOfOrder(), true, leaves[i].reader.getLiveDocs());
|
Scorer scorer = weight.scorer(leaves[i], !collector.acceptsDocsOutOfOrder(), true, leaves[i].reader().getLiveDocs());
|
||||||
if (scorer != null) {
|
if (scorer != null) {
|
||||||
scorer.score(collector);
|
scorer.score(collector);
|
||||||
}
|
}
|
||||||
|
@ -589,11 +591,11 @@ public class IndexSearcher {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns this searchers the top-level {@link ReaderContext}.
|
* Returns this searchers the top-level {@link IndexReaderContext}.
|
||||||
* @see IndexReader#getTopReaderContext()
|
* @see IndexReader#getTopReaderContext()
|
||||||
*/
|
*/
|
||||||
/* sugar for #getReader().getTopReaderContext() */
|
/* sugar for #getReader().getTopReaderContext() */
|
||||||
public ReaderContext getTopReaderContext() {
|
public IndexReaderContext getTopReaderContext() {
|
||||||
return readerContext;
|
return readerContext;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -17,8 +17,8 @@ package org.apache.lucene.search;
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
|
||||||
import org.apache.lucene.index.Term;
|
import org.apache.lucene.index.Term;
|
||||||
import org.apache.lucene.util.ToStringUtils;
|
import org.apache.lucene.util.ToStringUtils;
|
||||||
import org.apache.lucene.util.Bits;
|
import org.apache.lucene.util.Bits;
|
||||||
|
@ -106,7 +106,7 @@ public class MatchAllDocsQuery extends Query {
|
||||||
@Override
|
@Override
|
||||||
public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
|
public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
|
||||||
boolean topScorer, Bits acceptDocs) throws IOException {
|
boolean topScorer, Bits acceptDocs) throws IOException {
|
||||||
return new MatchAllScorer(context.reader, acceptDocs, this, queryWeight);
|
return new MatchAllScorer(context.reader(), acceptDocs, this, queryWeight);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -19,7 +19,7 @@ package org.apache.lucene.search;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.search.Collector;
|
import org.apache.lucene.search.Collector;
|
||||||
import org.apache.lucene.search.Scorer;
|
import org.apache.lucene.search.Scorer;
|
||||||
|
|
||||||
|
|
|
@ -20,11 +20,12 @@ package org.apache.lucene.search;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.*;
|
import java.util.*;
|
||||||
|
|
||||||
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.index.DocsAndPositionsEnum;
|
import org.apache.lucene.index.DocsAndPositionsEnum;
|
||||||
import org.apache.lucene.index.DocsEnum;
|
import org.apache.lucene.index.DocsEnum;
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
import org.apache.lucene.index.AtomicReader;
|
||||||
import org.apache.lucene.index.IndexReader.ReaderContext;
|
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
|
import org.apache.lucene.index.IndexReaderContext;
|
||||||
import org.apache.lucene.index.Term;
|
import org.apache.lucene.index.Term;
|
||||||
import org.apache.lucene.index.TermState;
|
import org.apache.lucene.index.TermState;
|
||||||
import org.apache.lucene.index.Terms;
|
import org.apache.lucene.index.Terms;
|
||||||
|
@ -142,7 +143,7 @@ public class MultiPhraseQuery extends Query {
|
||||||
public MultiPhraseWeight(IndexSearcher searcher)
|
public MultiPhraseWeight(IndexSearcher searcher)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
this.similarity = searcher.getSimilarityProvider().get(field);
|
this.similarity = searcher.getSimilarityProvider().get(field);
|
||||||
final ReaderContext context = searcher.getTopReaderContext();
|
final IndexReaderContext context = searcher.getTopReaderContext();
|
||||||
|
|
||||||
// compute idf
|
// compute idf
|
||||||
ArrayList<TermStatistics> allTermStats = new ArrayList<TermStatistics>();
|
ArrayList<TermStatistics> allTermStats = new ArrayList<TermStatistics>();
|
||||||
|
@ -177,7 +178,7 @@ public class MultiPhraseQuery extends Query {
|
||||||
public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
|
public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
|
||||||
boolean topScorer, Bits acceptDocs) throws IOException {
|
boolean topScorer, Bits acceptDocs) throws IOException {
|
||||||
assert !termArrays.isEmpty();
|
assert !termArrays.isEmpty();
|
||||||
final IndexReader reader = context.reader;
|
final AtomicReader reader = context.reader();
|
||||||
final Bits liveDocs = acceptDocs;
|
final Bits liveDocs = acceptDocs;
|
||||||
|
|
||||||
PhraseQuery.PostingsAndFreq[] postingsFreqs = new PhraseQuery.PostingsAndFreq[termArrays.size()];
|
PhraseQuery.PostingsAndFreq[] postingsFreqs = new PhraseQuery.PostingsAndFreq[termArrays.size()];
|
||||||
|
@ -258,7 +259,7 @@ public class MultiPhraseQuery extends Query {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
|
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
|
||||||
Scorer scorer = scorer(context, true, false, context.reader.getLiveDocs());
|
Scorer scorer = scorer(context, true, false, context.reader().getLiveDocs());
|
||||||
if (scorer != null) {
|
if (scorer != null) {
|
||||||
int newDoc = scorer.advance(doc);
|
int newDoc = scorer.advance(doc);
|
||||||
if (newDoc == doc) {
|
if (newDoc == doc) {
|
||||||
|
|
|
@ -19,10 +19,10 @@ package org.apache.lucene.search;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.index.DocsEnum;
|
import org.apache.lucene.index.DocsEnum;
|
||||||
import org.apache.lucene.index.Fields;
|
import org.apache.lucene.index.Fields;
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
import org.apache.lucene.index.AtomicReader;
|
||||||
import org.apache.lucene.index.IndexReader;
|
|
||||||
import org.apache.lucene.index.Terms;
|
import org.apache.lucene.index.Terms;
|
||||||
import org.apache.lucene.index.TermsEnum;
|
import org.apache.lucene.index.TermsEnum;
|
||||||
import org.apache.lucene.util.FixedBitSet;
|
import org.apache.lucene.util.FixedBitSet;
|
||||||
|
@ -83,7 +83,7 @@ public class MultiTermQueryWrapperFilter<Q extends MultiTermQuery> extends Filte
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
|
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
|
||||||
final IndexReader reader = context.reader;
|
final AtomicReader reader = context.reader();
|
||||||
final Fields fields = reader.fields();
|
final Fields fields = reader.fields();
|
||||||
if (fields == null) {
|
if (fields == null) {
|
||||||
// reader has no fields
|
// reader has no fields
|
||||||
|
@ -100,7 +100,7 @@ public class MultiTermQueryWrapperFilter<Q extends MultiTermQuery> extends Filte
|
||||||
assert termsEnum != null;
|
assert termsEnum != null;
|
||||||
if (termsEnum.next() != null) {
|
if (termsEnum.next() != null) {
|
||||||
// fill into a FixedBitSet
|
// fill into a FixedBitSet
|
||||||
final FixedBitSet bitSet = new FixedBitSet(context.reader.maxDoc());
|
final FixedBitSet bitSet = new FixedBitSet(context.reader().maxDoc());
|
||||||
DocsEnum docsEnum = null;
|
DocsEnum docsEnum = null;
|
||||||
do {
|
do {
|
||||||
// System.out.println(" iter termCount=" + termCount + " term=" +
|
// System.out.println(" iter termCount=" + termCount + " term=" +
|
||||||
|
|
|
@ -21,10 +21,11 @@ import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
|
||||||
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.index.DocsAndPositionsEnum;
|
import org.apache.lucene.index.DocsAndPositionsEnum;
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
|
||||||
import org.apache.lucene.index.IndexReader.ReaderContext;
|
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
|
import org.apache.lucene.index.AtomicReader;
|
||||||
|
import org.apache.lucene.index.IndexReaderContext;
|
||||||
import org.apache.lucene.index.Term;
|
import org.apache.lucene.index.Term;
|
||||||
import org.apache.lucene.index.TermState;
|
import org.apache.lucene.index.TermState;
|
||||||
import org.apache.lucene.index.Terms;
|
import org.apache.lucene.index.Terms;
|
||||||
|
@ -188,7 +189,7 @@ public class PhraseQuery extends Query {
|
||||||
public PhraseWeight(IndexSearcher searcher)
|
public PhraseWeight(IndexSearcher searcher)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
this.similarity = searcher.getSimilarityProvider().get(field);
|
this.similarity = searcher.getSimilarityProvider().get(field);
|
||||||
final ReaderContext context = searcher.getTopReaderContext();
|
final IndexReaderContext context = searcher.getTopReaderContext();
|
||||||
states = new TermContext[terms.size()];
|
states = new TermContext[terms.size()];
|
||||||
TermStatistics termStats[] = new TermStatistics[terms.size()];
|
TermStatistics termStats[] = new TermStatistics[terms.size()];
|
||||||
for (int i = 0; i < terms.size(); i++) {
|
for (int i = 0; i < terms.size(); i++) {
|
||||||
|
@ -219,7 +220,7 @@ public class PhraseQuery extends Query {
|
||||||
public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
|
public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
|
||||||
boolean topScorer, Bits acceptDocs) throws IOException {
|
boolean topScorer, Bits acceptDocs) throws IOException {
|
||||||
assert !terms.isEmpty();
|
assert !terms.isEmpty();
|
||||||
final IndexReader reader = context.reader;
|
final AtomicReader reader = context.reader();
|
||||||
final Bits liveDocs = acceptDocs;
|
final Bits liveDocs = acceptDocs;
|
||||||
PostingsAndFreq[] postingsFreqs = new PostingsAndFreq[terms.size()];
|
PostingsAndFreq[] postingsFreqs = new PostingsAndFreq[terms.size()];
|
||||||
|
|
||||||
|
@ -270,13 +271,13 @@ public class PhraseQuery extends Query {
|
||||||
}
|
}
|
||||||
|
|
||||||
// only called from assert
|
// only called from assert
|
||||||
private boolean termNotInReader(IndexReader reader, String field, BytesRef bytes) throws IOException {
|
private boolean termNotInReader(AtomicReader reader, String field, BytesRef bytes) throws IOException {
|
||||||
return reader.docFreq(field, bytes) == 0;
|
return reader.docFreq(field, bytes) == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
|
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
|
||||||
Scorer scorer = scorer(context, true, false, context.reader.getLiveDocs());
|
Scorer scorer = scorer(context, true, false, context.reader().getLiveDocs());
|
||||||
if (scorer != null) {
|
if (scorer != null) {
|
||||||
int newDoc = scorer.advance(doc);
|
int newDoc = scorer.advance(doc);
|
||||||
if (newDoc == doc) {
|
if (newDoc == doc) {
|
||||||
|
|
|
@ -19,7 +19,7 @@ package org.apache.lucene.search;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A {@link Collector} implementation which wraps another
|
* A {@link Collector} implementation which wraps another
|
||||||
|
|
|
@ -19,7 +19,7 @@ package org.apache.lucene.search;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.util.Bits;
|
import org.apache.lucene.util.Bits;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -50,8 +50,7 @@ public class QueryWrapperFilter extends Filter {
|
||||||
@Override
|
@Override
|
||||||
public DocIdSet getDocIdSet(final AtomicReaderContext context, final Bits acceptDocs) throws IOException {
|
public DocIdSet getDocIdSet(final AtomicReaderContext context, final Bits acceptDocs) throws IOException {
|
||||||
// get a private context that is used to rewrite, createWeight and score eventually
|
// get a private context that is used to rewrite, createWeight and score eventually
|
||||||
assert context.reader.getTopReaderContext().isAtomic;
|
final AtomicReaderContext privateContext = context.reader().getTopReaderContext();
|
||||||
final AtomicReaderContext privateContext = (AtomicReaderContext) context.reader.getTopReaderContext();
|
|
||||||
final Weight weight = new IndexSearcher(privateContext).createNormalizedWeight(query);
|
final Weight weight = new IndexSearcher(privateContext).createNormalizedWeight(query);
|
||||||
return new DocIdSet() {
|
return new DocIdSet() {
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -25,7 +25,7 @@ import java.util.List;
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
|
|
||||||
import org.apache.lucene.search.NRTManager; // javadocs
|
import org.apache.lucene.search.NRTManager; // javadocs
|
||||||
import org.apache.lucene.index.IndexReader; // javadocs
|
import org.apache.lucene.index.DirectoryReader;
|
||||||
import org.apache.lucene.store.AlreadyClosedException;
|
import org.apache.lucene.store.AlreadyClosedException;
|
||||||
import org.apache.lucene.util.IOUtils;
|
import org.apache.lucene.util.IOUtils;
|
||||||
|
|
||||||
|
@ -85,9 +85,9 @@ import org.apache.lucene.util.IOUtils;
|
||||||
* <p><b>NOTE</b>: keeping many searchers around means
|
* <p><b>NOTE</b>: keeping many searchers around means
|
||||||
* you'll use more resources (open files, RAM) than a single
|
* you'll use more resources (open files, RAM) than a single
|
||||||
* searcher. However, as long as you are using {@link
|
* searcher. However, as long as you are using {@link
|
||||||
* IndexReader#openIfChanged}, the searchers will usually
|
* DirectoryReader#openIfChanged(DirectoryReader)}, the searchers
|
||||||
* share almost all segments and the added resource usage is
|
* will usually share almost all segments and the added resource usage
|
||||||
* contained. When a large merge has completed, and
|
* is contained. When a large merge has completed, and
|
||||||
* you reopen, because that is a large change, the new
|
* you reopen, because that is a large change, the new
|
||||||
* searcher will use higher additional RAM than other
|
* searcher will use higher additional RAM than other
|
||||||
* searchers; but large merges don't complete very often and
|
* searchers; but large merges don't complete very often and
|
||||||
|
@ -109,7 +109,7 @@ public class SearcherLifetimeManager implements Closeable {
|
||||||
|
|
||||||
public SearcherTracker(IndexSearcher searcher) {
|
public SearcherTracker(IndexSearcher searcher) {
|
||||||
this.searcher = searcher;
|
this.searcher = searcher;
|
||||||
version = searcher.getIndexReader().getVersion();
|
version = ((DirectoryReader) searcher.getIndexReader()).getVersion();
|
||||||
searcher.getIndexReader().incRef();
|
searcher.getIndexReader().incRef();
|
||||||
// Use nanoTime not currentTimeMillis since it [in
|
// Use nanoTime not currentTimeMillis since it [in
|
||||||
// theory] reduces risk from clock shift
|
// theory] reduces risk from clock shift
|
||||||
|
@ -168,7 +168,7 @@ public class SearcherLifetimeManager implements Closeable {
|
||||||
// TODO: we don't have to use IR.getVersion to track;
|
// TODO: we don't have to use IR.getVersion to track;
|
||||||
// could be risky (if it's buggy); we could get better
|
// could be risky (if it's buggy); we could get better
|
||||||
// bug isolation if we assign our own private ID:
|
// bug isolation if we assign our own private ID:
|
||||||
final long version = searcher.getIndexReader().getVersion();
|
final long version = ((DirectoryReader) searcher.getIndexReader()).getVersion();
|
||||||
SearcherTracker tracker = searchers.get(version);
|
SearcherTracker tracker = searchers.get(version);
|
||||||
if (tracker == null) {
|
if (tracker == null) {
|
||||||
//System.out.println("RECORD version=" + version + " ms=" + System.currentTimeMillis());
|
//System.out.println("RECORD version=" + version + " ms=" + System.currentTimeMillis());
|
||||||
|
|
|
@ -23,6 +23,7 @@ import java.util.concurrent.Semaphore;
|
||||||
|
|
||||||
import org.apache.lucene.index.CorruptIndexException;
|
import org.apache.lucene.index.CorruptIndexException;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
|
import org.apache.lucene.index.DirectoryReader;
|
||||||
import org.apache.lucene.index.IndexWriter;
|
import org.apache.lucene.index.IndexWriter;
|
||||||
import org.apache.lucene.search.NRTManager; // javadocs
|
import org.apache.lucene.search.NRTManager; // javadocs
|
||||||
import org.apache.lucene.search.IndexSearcher; // javadocs
|
import org.apache.lucene.search.IndexSearcher; // javadocs
|
||||||
|
@ -76,12 +77,12 @@ public final class SearcherManager implements Closeable {
|
||||||
* Creates and returns a new SearcherManager from the given {@link IndexWriter}.
|
* Creates and returns a new SearcherManager from the given {@link IndexWriter}.
|
||||||
* @param writer the IndexWriter to open the IndexReader from.
|
* @param writer the IndexWriter to open the IndexReader from.
|
||||||
* @param applyAllDeletes If <code>true</code>, all buffered deletes will
|
* @param applyAllDeletes If <code>true</code>, all buffered deletes will
|
||||||
* be applied (made visible) in the {@link IndexSearcher} / {@link IndexReader}.
|
* be applied (made visible) in the {@link IndexSearcher} / {@link DirectoryReader}.
|
||||||
* If <code>false</code>, the deletes may or may not be applied, but remain buffered
|
* If <code>false</code>, the deletes may or may not be applied, but remain buffered
|
||||||
* (in IndexWriter) so that they will be applied in the future.
|
* (in IndexWriter) so that they will be applied in the future.
|
||||||
* Applying deletes can be costly, so if your app can tolerate deleted documents
|
* Applying deletes can be costly, so if your app can tolerate deleted documents
|
||||||
* being returned you might gain some performance by passing <code>false</code>.
|
* being returned you might gain some performance by passing <code>false</code>.
|
||||||
* See {@link IndexReader#openIfChanged(IndexReader, IndexWriter, boolean)}.
|
* See {@link DirectoryReader#openIfChanged(DirectoryReader, IndexWriter, boolean)}.
|
||||||
* @param searcherFactory An optional {@link SearcherFactory}. Pass
|
* @param searcherFactory An optional {@link SearcherFactory}. Pass
|
||||||
* <code>null</code> if you don't require the searcher to be warmed
|
* <code>null</code> if you don't require the searcher to be warmed
|
||||||
* before going live or other custom behavior.
|
* before going live or other custom behavior.
|
||||||
|
@ -93,12 +94,12 @@ public final class SearcherManager implements Closeable {
|
||||||
searcherFactory = new SearcherFactory();
|
searcherFactory = new SearcherFactory();
|
||||||
}
|
}
|
||||||
this.searcherFactory = searcherFactory;
|
this.searcherFactory = searcherFactory;
|
||||||
currentSearcher = searcherFactory.newSearcher(IndexReader.open(writer, applyAllDeletes));
|
currentSearcher = searcherFactory.newSearcher(DirectoryReader.open(writer, applyAllDeletes));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates and returns a new SearcherManager from the given {@link Directory}.
|
* Creates and returns a new SearcherManager from the given {@link Directory}.
|
||||||
* @param dir the directory to open the IndexReader on.
|
* @param dir the directory to open the DirectoryReader on.
|
||||||
* @param searcherFactory An optional {@link SearcherFactory}. Pass
|
* @param searcherFactory An optional {@link SearcherFactory}. Pass
|
||||||
* <code>null</code> if you don't require the searcher to be warmed
|
* <code>null</code> if you don't require the searcher to be warmed
|
||||||
* before going live or other custom behavior.
|
* before going live or other custom behavior.
|
||||||
|
@ -110,12 +111,12 @@ public final class SearcherManager implements Closeable {
|
||||||
searcherFactory = new SearcherFactory();
|
searcherFactory = new SearcherFactory();
|
||||||
}
|
}
|
||||||
this.searcherFactory = searcherFactory;
|
this.searcherFactory = searcherFactory;
|
||||||
currentSearcher = searcherFactory.newSearcher(IndexReader.open(dir));
|
currentSearcher = searcherFactory.newSearcher(DirectoryReader.open(dir));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* You must call this, periodically, to perform a reopen. This calls
|
* You must call this, periodically, to perform a reopen. This calls
|
||||||
* {@link IndexReader#openIfChanged(IndexReader)} with the underlying reader, and if that returns a
|
* {@link DirectoryReader#openIfChanged(DirectoryReader)} with the underlying reader, and if that returns a
|
||||||
* new reader, it's warmed (if you provided a {@link SearcherFactory} and then
|
* new reader, it's warmed (if you provided a {@link SearcherFactory} and then
|
||||||
* swapped into production.
|
* swapped into production.
|
||||||
*
|
*
|
||||||
|
@ -144,7 +145,10 @@ public final class SearcherManager implements Closeable {
|
||||||
final IndexReader newReader;
|
final IndexReader newReader;
|
||||||
final IndexSearcher searcherToReopen = acquire();
|
final IndexSearcher searcherToReopen = acquire();
|
||||||
try {
|
try {
|
||||||
newReader = IndexReader.openIfChanged(searcherToReopen.getIndexReader());
|
final IndexReader r = searcherToReopen.getIndexReader();
|
||||||
|
newReader = (r instanceof DirectoryReader) ?
|
||||||
|
DirectoryReader.openIfChanged((DirectoryReader) r) :
|
||||||
|
null;
|
||||||
} finally {
|
} finally {
|
||||||
release(searcherToReopen);
|
release(searcherToReopen);
|
||||||
}
|
}
|
||||||
|
@ -172,13 +176,16 @@ public final class SearcherManager implements Closeable {
|
||||||
/**
|
/**
|
||||||
* Returns <code>true</code> if no changes have occured since this searcher
|
* Returns <code>true</code> if no changes have occured since this searcher
|
||||||
* ie. reader was opened, otherwise <code>false</code>.
|
* ie. reader was opened, otherwise <code>false</code>.
|
||||||
* @see IndexReader#isCurrent()
|
* @see DirectoryReader#isCurrent()
|
||||||
*/
|
*/
|
||||||
public boolean isSearcherCurrent() throws CorruptIndexException,
|
public boolean isSearcherCurrent() throws CorruptIndexException,
|
||||||
IOException {
|
IOException {
|
||||||
final IndexSearcher searcher = acquire();
|
final IndexSearcher searcher = acquire();
|
||||||
try {
|
try {
|
||||||
return searcher.getIndexReader().isCurrent();
|
final IndexReader r = searcher.getIndexReader();
|
||||||
|
return r instanceof DirectoryReader ?
|
||||||
|
((DirectoryReader ) r).isCurrent() :
|
||||||
|
true;
|
||||||
} finally {
|
} finally {
|
||||||
release(searcher);
|
release(searcher);
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,13 +20,13 @@ package org.apache.lucene.search;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Comparator;
|
import java.util.Comparator;
|
||||||
|
|
||||||
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.index.Fields;
|
import org.apache.lucene.index.Fields;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
|
import org.apache.lucene.index.IndexReaderContext;
|
||||||
import org.apache.lucene.index.Term;
|
import org.apache.lucene.index.Term;
|
||||||
import org.apache.lucene.index.Terms;
|
import org.apache.lucene.index.Terms;
|
||||||
import org.apache.lucene.index.TermsEnum;
|
import org.apache.lucene.index.TermsEnum;
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
|
||||||
import org.apache.lucene.index.IndexReader.ReaderContext;
|
|
||||||
import org.apache.lucene.util.AttributeSource;
|
import org.apache.lucene.util.AttributeSource;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.apache.lucene.util.TermContext;
|
import org.apache.lucene.util.TermContext;
|
||||||
|
@ -47,11 +47,11 @@ abstract class TermCollectingRewrite<Q extends Query> extends MultiTermQuery.Rew
|
||||||
|
|
||||||
|
|
||||||
protected final void collectTerms(IndexReader reader, MultiTermQuery query, TermCollector collector) throws IOException {
|
protected final void collectTerms(IndexReader reader, MultiTermQuery query, TermCollector collector) throws IOException {
|
||||||
ReaderContext topReaderContext = reader.getTopReaderContext();
|
IndexReaderContext topReaderContext = reader.getTopReaderContext();
|
||||||
Comparator<BytesRef> lastTermComp = null;
|
Comparator<BytesRef> lastTermComp = null;
|
||||||
final AtomicReaderContext[] leaves = ReaderUtil.leaves(topReaderContext);
|
final AtomicReaderContext[] leaves = ReaderUtil.leaves(topReaderContext);
|
||||||
for (AtomicReaderContext context : leaves) {
|
for (AtomicReaderContext context : leaves) {
|
||||||
final Fields fields = context.reader.fields();
|
final Fields fields = context.reader().fields();
|
||||||
if (fields == null) {
|
if (fields == null) {
|
||||||
// reader has no fields
|
// reader has no fields
|
||||||
continue;
|
continue;
|
||||||
|
@ -87,9 +87,9 @@ abstract class TermCollectingRewrite<Q extends Query> extends MultiTermQuery.Rew
|
||||||
protected static abstract class TermCollector {
|
protected static abstract class TermCollector {
|
||||||
|
|
||||||
protected AtomicReaderContext readerContext;
|
protected AtomicReaderContext readerContext;
|
||||||
protected ReaderContext topReaderContext;
|
protected IndexReaderContext topReaderContext;
|
||||||
|
|
||||||
public void setReaderContext(ReaderContext topReaderContext, AtomicReaderContext readerContext) {
|
public void setReaderContext(IndexReaderContext topReaderContext, AtomicReaderContext readerContext) {
|
||||||
this.readerContext = readerContext;
|
this.readerContext = readerContext;
|
||||||
this.topReaderContext = topReaderContext;
|
this.topReaderContext = topReaderContext;
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,10 +20,10 @@ package org.apache.lucene.search;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
|
||||||
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.index.DocsEnum;
|
import org.apache.lucene.index.DocsEnum;
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
import org.apache.lucene.index.AtomicReader;
|
||||||
import org.apache.lucene.index.IndexReader.ReaderContext;
|
import org.apache.lucene.index.IndexReaderContext;
|
||||||
import org.apache.lucene.index.IndexReader;
|
|
||||||
import org.apache.lucene.index.Term;
|
import org.apache.lucene.index.Term;
|
||||||
import org.apache.lucene.index.TermState;
|
import org.apache.lucene.index.TermState;
|
||||||
import org.apache.lucene.index.TermsEnum;
|
import org.apache.lucene.index.TermsEnum;
|
||||||
|
@ -108,16 +108,16 @@ public class TermQuery extends Query {
|
||||||
TermsEnum getTermsEnum(AtomicReaderContext context) throws IOException {
|
TermsEnum getTermsEnum(AtomicReaderContext context) throws IOException {
|
||||||
final TermState state = termStates.get(context.ord);
|
final TermState state = termStates.get(context.ord);
|
||||||
if (state == null) { // term is not present in that reader
|
if (state == null) { // term is not present in that reader
|
||||||
assert termNotInReader(context.reader, term.field(), term.bytes()) : "no termstate found but term exists in reader term=" + term;
|
assert termNotInReader(context.reader(), term.field(), term.bytes()) : "no termstate found but term exists in reader term=" + term;
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
//System.out.println("LD=" + reader.getLiveDocs() + " set?=" + (reader.getLiveDocs() != null ? reader.getLiveDocs().get(0) : "null"));
|
//System.out.println("LD=" + reader.getLiveDocs() + " set?=" + (reader.getLiveDocs() != null ? reader.getLiveDocs().get(0) : "null"));
|
||||||
final TermsEnum termsEnum = context.reader.terms(term.field()).iterator(null);
|
final TermsEnum termsEnum = context.reader().terms(term.field()).iterator(null);
|
||||||
termsEnum.seekExact(term.bytes(), state);
|
termsEnum.seekExact(term.bytes(), state);
|
||||||
return termsEnum;
|
return termsEnum;
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean termNotInReader(IndexReader reader, String field, BytesRef bytes) throws IOException {
|
private boolean termNotInReader(AtomicReader reader, String field, BytesRef bytes) throws IOException {
|
||||||
// only called from assert
|
// only called from assert
|
||||||
//System.out.println("TQ.termNotInReader reader=" + reader + " term=" + field + ":" + bytes.utf8ToString());
|
//System.out.println("TQ.termNotInReader reader=" + reader + " term=" + field + ":" + bytes.utf8ToString());
|
||||||
return reader.docFreq(field, bytes) == 0;
|
return reader.docFreq(field, bytes) == 0;
|
||||||
|
@ -125,7 +125,7 @@ public class TermQuery extends Query {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
|
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
|
||||||
Scorer scorer = scorer(context, true, false, context.reader.getLiveDocs());
|
Scorer scorer = scorer(context, true, false, context.reader().getLiveDocs());
|
||||||
if (scorer != null) {
|
if (scorer != null) {
|
||||||
int newDoc = scorer.advance(doc);
|
int newDoc = scorer.advance(doc);
|
||||||
if (newDoc == doc) {
|
if (newDoc == doc) {
|
||||||
|
@ -173,7 +173,7 @@ public class TermQuery extends Query {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Weight createWeight(IndexSearcher searcher) throws IOException {
|
public Weight createWeight(IndexSearcher searcher) throws IOException {
|
||||||
final ReaderContext context = searcher.getTopReaderContext();
|
final IndexReaderContext context = searcher.getTopReaderContext();
|
||||||
final TermContext termState;
|
final TermContext termState;
|
||||||
if (perReaderTermState == null || perReaderTermState.topReaderContext != context) {
|
if (perReaderTermState == null || perReaderTermState.topReaderContext != context) {
|
||||||
// make TermQuery single-pass if we don't have a PRTS or if the context differs!
|
// make TermQuery single-pass if we don't have a PRTS or if the context differs!
|
||||||
|
|
|
@ -17,7 +17,7 @@ package org.apache.lucene.search;
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import org.apache.lucene.index.IndexReader; // javadocs
|
import org.apache.lucene.index.AtomicReader; // javadocs
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
/**
|
/**
|
||||||
* Contains statistics for a specific term
|
* Contains statistics for a specific term
|
||||||
|
@ -42,13 +42,13 @@ public class TermStatistics {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** returns the number of documents this term occurs in
|
/** returns the number of documents this term occurs in
|
||||||
* @see IndexReader#docFreq(String, BytesRef) */
|
* @see AtomicReader#docFreq(String, BytesRef) */
|
||||||
public final long docFreq() {
|
public final long docFreq() {
|
||||||
return docFreq;
|
return docFreq;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** returns the total number of occurrences of this term
|
/** returns the total number of occurrences of this term
|
||||||
* @see IndexReader#totalTermFreq(String, BytesRef) */
|
* @see AtomicReader#totalTermFreq(String, BytesRef) */
|
||||||
public final long totalTermFreq() {
|
public final long totalTermFreq() {
|
||||||
return totalTermFreq;
|
return totalTermFreq;
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,7 +19,7 @@ package org.apache.lucene.search;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.util.Counter;
|
import org.apache.lucene.util.Counter;
|
||||||
import org.apache.lucene.util.ThreadInterruptedException;
|
import org.apache.lucene.util.ThreadInterruptedException;
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,7 @@ package org.apache.lucene.search;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.search.FieldValueHitQueue.Entry;
|
import org.apache.lucene.search.FieldValueHitQueue.Entry;
|
||||||
import org.apache.lucene.util.PriorityQueue;
|
import org.apache.lucene.util.PriorityQueue;
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,7 @@ package org.apache.lucene.search;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A {@link Collector} implementation that collects the top-scoring hits,
|
* A {@link Collector} implementation that collects the top-scoring hits,
|
||||||
|
|
|
@ -17,7 +17,7 @@ package org.apache.lucene.search;
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Just counts the total number of hits.
|
* Just counts the total number of hits.
|
||||||
|
|
|
@ -19,9 +19,9 @@ package org.apache.lucene.search;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.AtomicReader; // javadocs
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.index.IndexReader.ReaderContext;
|
import org.apache.lucene.index.IndexReaderContext; // javadocs
|
||||||
import org.apache.lucene.search.similarities.SimilarityProvider;
|
import org.apache.lucene.search.similarities.SimilarityProvider;
|
||||||
import org.apache.lucene.util.Bits;
|
import org.apache.lucene.util.Bits;
|
||||||
|
|
||||||
|
@ -32,13 +32,13 @@ import org.apache.lucene.util.Bits;
|
||||||
* {@link Query}, so that a {@link Query} instance can be reused. <br>
|
* {@link Query}, so that a {@link Query} instance can be reused. <br>
|
||||||
* {@link IndexSearcher} dependent state of the query should reside in the
|
* {@link IndexSearcher} dependent state of the query should reside in the
|
||||||
* {@link Weight}. <br>
|
* {@link Weight}. <br>
|
||||||
* {@link IndexReader} dependent state should reside in the {@link Scorer}.
|
* {@link AtomicReader} dependent state should reside in the {@link Scorer}.
|
||||||
* <p>
|
* <p>
|
||||||
* Since {@link Weight} creates {@link Scorer} instances for a given
|
* Since {@link Weight} creates {@link Scorer} instances for a given
|
||||||
* {@link AtomicReaderContext} ({@link #scorer(IndexReader.AtomicReaderContext,
|
* {@link AtomicReaderContext} ({@link #scorer(AtomicReaderContext,
|
||||||
* boolean, boolean, Bits)})
|
* boolean, boolean, Bits)})
|
||||||
* callers must maintain the relationship between the searcher's top-level
|
* callers must maintain the relationship between the searcher's top-level
|
||||||
* {@link ReaderContext} and the context used to create a {@link Scorer}.
|
* {@link IndexReaderContext} and the context used to create a {@link Scorer}.
|
||||||
* <p>
|
* <p>
|
||||||
* A <code>Weight</code> is used in the following way:
|
* A <code>Weight</code> is used in the following way:
|
||||||
* <ol>
|
* <ol>
|
||||||
|
@ -51,7 +51,7 @@ import org.apache.lucene.util.Bits;
|
||||||
* <li>The query normalization factor is passed to {@link #normalize(float, float)}. At
|
* <li>The query normalization factor is passed to {@link #normalize(float, float)}. At
|
||||||
* this point the weighting is complete.
|
* this point the weighting is complete.
|
||||||
* <li>A <code>Scorer</code> is constructed by
|
* <li>A <code>Scorer</code> is constructed by
|
||||||
* {@link #scorer(IndexReader.AtomicReaderContext, boolean, boolean, Bits)}.
|
* {@link #scorer(AtomicReaderContext, boolean, boolean, Bits)}.
|
||||||
* </ol>
|
* </ol>
|
||||||
*
|
*
|
||||||
* @since 2.9
|
* @since 2.9
|
||||||
|
@ -117,7 +117,7 @@ public abstract class Weight {
|
||||||
* Returns true iff this implementation scores docs only out of order. This
|
* Returns true iff this implementation scores docs only out of order. This
|
||||||
* method is used in conjunction with {@link Collector}'s
|
* method is used in conjunction with {@link Collector}'s
|
||||||
* {@link Collector#acceptsDocsOutOfOrder() acceptsDocsOutOfOrder} and
|
* {@link Collector#acceptsDocsOutOfOrder() acceptsDocsOutOfOrder} and
|
||||||
* {@link #scorer(IndexReader.AtomicReaderContext, boolean, boolean, Bits)} to
|
* {@link #scorer(AtomicReaderContext, boolean, boolean, Bits)} to
|
||||||
* create a matching {@link Scorer} instance for a given {@link Collector}, or
|
* create a matching {@link Scorer} instance for a given {@link Collector}, or
|
||||||
* vice versa.
|
* vice versa.
|
||||||
* <p>
|
* <p>
|
||||||
|
|
|
@ -17,7 +17,7 @@ package org.apache.lucene.search.payloads;
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.search.ComplexExplanation;
|
import org.apache.lucene.search.ComplexExplanation;
|
||||||
import org.apache.lucene.search.Explanation;
|
import org.apache.lucene.search.Explanation;
|
||||||
import org.apache.lucene.search.Scorer;
|
import org.apache.lucene.search.Scorer;
|
||||||
|
@ -156,7 +156,7 @@ public class PayloadNearQuery extends SpanNearQuery {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
|
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
|
||||||
PayloadNearSpanScorer scorer = (PayloadNearSpanScorer) scorer(context, true, false, context.reader.getLiveDocs());
|
PayloadNearSpanScorer scorer = (PayloadNearSpanScorer) scorer(context, true, false, context.reader().getLiveDocs());
|
||||||
if (scorer != null) {
|
if (scorer != null) {
|
||||||
int newDoc = scorer.advance(doc);
|
int newDoc = scorer.advance(doc);
|
||||||
if (newDoc == doc) {
|
if (newDoc == doc) {
|
||||||
|
|
|
@ -26,9 +26,9 @@ import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.TreeSet;
|
import java.util.TreeSet;
|
||||||
|
|
||||||
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
import org.apache.lucene.index.IndexReaderContext;
|
||||||
import org.apache.lucene.index.IndexReader.ReaderContext;
|
|
||||||
import org.apache.lucene.index.Term;
|
import org.apache.lucene.index.Term;
|
||||||
import org.apache.lucene.search.BooleanClause;
|
import org.apache.lucene.search.BooleanClause;
|
||||||
import org.apache.lucene.search.BooleanQuery;
|
import org.apache.lucene.search.BooleanQuery;
|
||||||
|
@ -55,7 +55,7 @@ import org.apache.lucene.util.TermContext;
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
public class PayloadSpanUtil {
|
public class PayloadSpanUtil {
|
||||||
private ReaderContext context;
|
private IndexReaderContext context;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param context
|
* @param context
|
||||||
|
@ -63,7 +63,7 @@ public class PayloadSpanUtil {
|
||||||
*
|
*
|
||||||
* @see IndexReader#getTopReaderContext()
|
* @see IndexReader#getTopReaderContext()
|
||||||
*/
|
*/
|
||||||
public PayloadSpanUtil(ReaderContext context) {
|
public PayloadSpanUtil(IndexReaderContext context) {
|
||||||
this.context = context;
|
this.context = context;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -186,7 +186,7 @@ public class PayloadSpanUtil {
|
||||||
}
|
}
|
||||||
final AtomicReaderContext[] leaves = ReaderUtil.leaves(context);
|
final AtomicReaderContext[] leaves = ReaderUtil.leaves(context);
|
||||||
for (AtomicReaderContext atomicReaderContext : leaves) {
|
for (AtomicReaderContext atomicReaderContext : leaves) {
|
||||||
final Spans spans = query.getSpans(atomicReaderContext, atomicReaderContext.reader.getLiveDocs(), termContexts);
|
final Spans spans = query.getSpans(atomicReaderContext, atomicReaderContext.reader().getLiveDocs(), termContexts);
|
||||||
while (spans.next() == true) {
|
while (spans.next() == true) {
|
||||||
if (spans.isPayloadAvailable()) {
|
if (spans.isPayloadAvailable()) {
|
||||||
Collection<byte[]> payload = spans.getPayload();
|
Collection<byte[]> payload = spans.getPayload();
|
||||||
|
|
|
@ -17,7 +17,7 @@ package org.apache.lucene.search.payloads;
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.index.Term;
|
import org.apache.lucene.index.Term;
|
||||||
import org.apache.lucene.index.DocsAndPositionsEnum;
|
import org.apache.lucene.index.DocsAndPositionsEnum;
|
||||||
import org.apache.lucene.search.IndexSearcher;
|
import org.apache.lucene.search.IndexSearcher;
|
||||||
|
@ -175,7 +175,7 @@ public class PayloadTermQuery extends SpanTermQuery {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
|
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
|
||||||
PayloadTermSpanScorer scorer = (PayloadTermSpanScorer) scorer(context, true, false, context.reader.getLiveDocs());
|
PayloadTermSpanScorer scorer = (PayloadTermSpanScorer) scorer(context, true, false, context.reader().getLiveDocs());
|
||||||
if (scorer != null) {
|
if (scorer != null) {
|
||||||
int newDoc = scorer.advance(doc);
|
int newDoc = scorer.advance(doc);
|
||||||
if (newDoc == doc) {
|
if (newDoc == doc) {
|
||||||
|
|
|
@ -19,9 +19,9 @@ package org.apache.lucene.search.similarities;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.index.DocValues;
|
import org.apache.lucene.index.DocValues;
|
||||||
import org.apache.lucene.index.FieldInvertState;
|
import org.apache.lucene.index.FieldInvertState;
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
|
||||||
import org.apache.lucene.index.Norm;
|
import org.apache.lucene.index.Norm;
|
||||||
import org.apache.lucene.search.CollectionStatistics;
|
import org.apache.lucene.search.CollectionStatistics;
|
||||||
import org.apache.lucene.search.Explanation;
|
import org.apache.lucene.search.Explanation;
|
||||||
|
@ -168,7 +168,7 @@ public class BM25Similarity extends Similarity {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public final ExactDocScorer exactDocScorer(Stats stats, String fieldName, AtomicReaderContext context) throws IOException {
|
public final ExactDocScorer exactDocScorer(Stats stats, String fieldName, AtomicReaderContext context) throws IOException {
|
||||||
final DocValues norms = context.reader.normValues(fieldName);
|
final DocValues norms = context.reader().normValues(fieldName);
|
||||||
return norms == null
|
return norms == null
|
||||||
? new ExactBM25DocScorerNoNorms((BM25Stats)stats)
|
? new ExactBM25DocScorerNoNorms((BM25Stats)stats)
|
||||||
: new ExactBM25DocScorer((BM25Stats)stats, norms);
|
: new ExactBM25DocScorer((BM25Stats)stats, norms);
|
||||||
|
@ -176,7 +176,7 @@ public class BM25Similarity extends Similarity {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public final SloppyDocScorer sloppyDocScorer(Stats stats, String fieldName, AtomicReaderContext context) throws IOException {
|
public final SloppyDocScorer sloppyDocScorer(Stats stats, String fieldName, AtomicReaderContext context) throws IOException {
|
||||||
return new SloppyBM25DocScorer((BM25Stats) stats, context.reader.normValues(fieldName));
|
return new SloppyBM25DocScorer((BM25Stats) stats, context.reader().normValues(fieldName));
|
||||||
}
|
}
|
||||||
|
|
||||||
private class ExactBM25DocScorer extends ExactDocScorer {
|
private class ExactBM25DocScorer extends ExactDocScorer {
|
||||||
|
|
|
@ -19,8 +19,8 @@ package org.apache.lucene.search.similarities;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.index.FieldInvertState;
|
import org.apache.lucene.index.FieldInvertState;
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
|
||||||
import org.apache.lucene.index.Norm;
|
import org.apache.lucene.index.Norm;
|
||||||
import org.apache.lucene.search.CollectionStatistics;
|
import org.apache.lucene.search.CollectionStatistics;
|
||||||
import org.apache.lucene.search.Explanation;
|
import org.apache.lucene.search.Explanation;
|
||||||
|
|
|
@ -21,9 +21,10 @@ package org.apache.lucene.search.similarities;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.lucene.document.DocValuesField; // javadoc
|
import org.apache.lucene.document.DocValuesField; // javadoc
|
||||||
|
import org.apache.lucene.index.AtomicReader; // javadoc
|
||||||
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.index.FieldInvertState;
|
import org.apache.lucene.index.FieldInvertState;
|
||||||
import org.apache.lucene.index.IndexReader; // javadoc
|
import org.apache.lucene.index.IndexReader; // javadoc
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
|
||||||
import org.apache.lucene.index.Norm;
|
import org.apache.lucene.index.Norm;
|
||||||
import org.apache.lucene.index.Terms; // javadoc
|
import org.apache.lucene.index.Terms; // javadoc
|
||||||
import org.apache.lucene.search.BooleanQuery;
|
import org.apache.lucene.search.BooleanQuery;
|
||||||
|
@ -57,7 +58,7 @@ import org.apache.lucene.util.SmallFloat; // javadoc
|
||||||
* <a name="indextime"/>
|
* <a name="indextime"/>
|
||||||
* At indexing time, the indexer calls {@link #computeNorm(FieldInvertState, Norm)}, allowing
|
* At indexing time, the indexer calls {@link #computeNorm(FieldInvertState, Norm)}, allowing
|
||||||
* the Similarity implementation to set a per-document value for the field that will
|
* the Similarity implementation to set a per-document value for the field that will
|
||||||
* be later accessible via {@link IndexReader#normValues(String)}. Lucene makes no assumption
|
* be later accessible via {@link AtomicReader#normValues(String)}. Lucene makes no assumption
|
||||||
* about what is in this byte, but it is most useful for encoding length normalization
|
* about what is in this byte, but it is most useful for encoding length normalization
|
||||||
* information.
|
* information.
|
||||||
* <p>
|
* <p>
|
||||||
|
@ -72,7 +73,7 @@ import org.apache.lucene.util.SmallFloat; // javadoc
|
||||||
* Because index-time boost is handled entirely at the application level anyway,
|
* Because index-time boost is handled entirely at the application level anyway,
|
||||||
* an application can alternatively store the index-time boost separately using an
|
* an application can alternatively store the index-time boost separately using an
|
||||||
* {@link DocValuesField}, and access this at query-time with
|
* {@link DocValuesField}, and access this at query-time with
|
||||||
* {@link IndexReader#docValues(String)}.
|
* {@link AtomicReader#docValues(String)}.
|
||||||
* <p>
|
* <p>
|
||||||
* Finally, using index-time boosts (either via folding into the normalization byte or
|
* Finally, using index-time boosts (either via folding into the normalization byte or
|
||||||
* via DocValues), is an inefficient way to boost the scores of different fields if the
|
* via DocValues), is an inefficient way to boost the scores of different fields if the
|
||||||
|
@ -93,9 +94,9 @@ import org.apache.lucene.util.SmallFloat; // javadoc
|
||||||
* is called for each query leaf node, {@link SimilarityProvider#queryNorm(float)} is called for the top-level
|
* is called for each query leaf node, {@link SimilarityProvider#queryNorm(float)} is called for the top-level
|
||||||
* query, and finally {@link Similarity.Stats#normalize(float, float)} passes down the normalization value
|
* query, and finally {@link Similarity.Stats#normalize(float, float)} passes down the normalization value
|
||||||
* and any top-level boosts (e.g. from enclosing {@link BooleanQuery}s).
|
* and any top-level boosts (e.g. from enclosing {@link BooleanQuery}s).
|
||||||
* <li>For each segment in the index, the Query creates a {@link #exactDocScorer(Stats, String, IndexReader.AtomicReaderContext)}
|
* <li>For each segment in the index, the Query creates a {@link #exactDocScorer(Stats, String, AtomicReaderContext)}
|
||||||
* (for queries with exact frequencies such as TermQuerys and exact PhraseQueries) or a
|
* (for queries with exact frequencies such as TermQuerys and exact PhraseQueries) or a
|
||||||
* {@link #sloppyDocScorer(Stats, String, IndexReader.AtomicReaderContext)} (for queries with sloppy frequencies such as
|
* {@link #sloppyDocScorer(Stats, String, AtomicReaderContext)} (for queries with sloppy frequencies such as
|
||||||
* SpanQuerys and sloppy PhraseQueries). The score() method is called for each matching document.
|
* SpanQuerys and sloppy PhraseQueries). The score() method is called for each matching document.
|
||||||
* </ol>
|
* </ol>
|
||||||
* <p>
|
* <p>
|
||||||
|
|
|
@ -19,9 +19,9 @@ package org.apache.lucene.search.similarities;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.index.DocValues;
|
import org.apache.lucene.index.DocValues;
|
||||||
import org.apache.lucene.index.FieldInvertState;
|
import org.apache.lucene.index.FieldInvertState;
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
|
||||||
import org.apache.lucene.index.Norm;
|
import org.apache.lucene.index.Norm;
|
||||||
import org.apache.lucene.search.CollectionStatistics;
|
import org.apache.lucene.search.CollectionStatistics;
|
||||||
import org.apache.lucene.search.Explanation;
|
import org.apache.lucene.search.Explanation;
|
||||||
|
@ -181,7 +181,7 @@ public abstract class SimilarityBase extends Similarity {
|
||||||
@Override
|
@Override
|
||||||
public ExactDocScorer exactDocScorer(Stats stats, String fieldName,
|
public ExactDocScorer exactDocScorer(Stats stats, String fieldName,
|
||||||
AtomicReaderContext context) throws IOException {
|
AtomicReaderContext context) throws IOException {
|
||||||
DocValues norms = context.reader.normValues(fieldName);
|
DocValues norms = context.reader().normValues(fieldName);
|
||||||
|
|
||||||
if (stats instanceof MultiSimilarity.MultiStats) {
|
if (stats instanceof MultiSimilarity.MultiStats) {
|
||||||
// a multi term query (e.g. phrase). return the summation,
|
// a multi term query (e.g. phrase). return the summation,
|
||||||
|
@ -200,7 +200,7 @@ public abstract class SimilarityBase extends Similarity {
|
||||||
@Override
|
@Override
|
||||||
public SloppyDocScorer sloppyDocScorer(Stats stats, String fieldName,
|
public SloppyDocScorer sloppyDocScorer(Stats stats, String fieldName,
|
||||||
AtomicReaderContext context) throws IOException {
|
AtomicReaderContext context) throws IOException {
|
||||||
DocValues norms = context.reader.normValues(fieldName);
|
DocValues norms = context.reader().normValues(fieldName);
|
||||||
|
|
||||||
if (stats instanceof MultiSimilarity.MultiStats) {
|
if (stats instanceof MultiSimilarity.MultiStats) {
|
||||||
// a multi term query (e.g. phrase). return the summation,
|
// a multi term query (e.g. phrase). return the summation,
|
||||||
|
|
|
@ -20,8 +20,8 @@ package org.apache.lucene.search.similarities;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.index.DocValues;
|
import org.apache.lucene.index.DocValues;
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
|
||||||
import org.apache.lucene.search.CollectionStatistics;
|
import org.apache.lucene.search.CollectionStatistics;
|
||||||
import org.apache.lucene.search.Explanation;
|
import org.apache.lucene.search.Explanation;
|
||||||
import org.apache.lucene.search.IndexSearcher;
|
import org.apache.lucene.search.IndexSearcher;
|
||||||
|
@ -703,12 +703,12 @@ public abstract class TFIDFSimilarity extends Similarity {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public final ExactDocScorer exactDocScorer(Stats stats, String fieldName, AtomicReaderContext context) throws IOException {
|
public final ExactDocScorer exactDocScorer(Stats stats, String fieldName, AtomicReaderContext context) throws IOException {
|
||||||
return new ExactTFIDFDocScorer((IDFStats)stats, context.reader.normValues(fieldName));
|
return new ExactTFIDFDocScorer((IDFStats)stats, context.reader().normValues(fieldName));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public final SloppyDocScorer sloppyDocScorer(Stats stats, String fieldName, AtomicReaderContext context) throws IOException {
|
public final SloppyDocScorer sloppyDocScorer(Stats stats, String fieldName, AtomicReaderContext context) throws IOException {
|
||||||
return new SloppyTFIDFDocScorer((IDFStats)stats, context.reader.normValues(fieldName));
|
return new SloppyTFIDFDocScorer((IDFStats)stats, context.reader().normValues(fieldName));
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: we can specialize these for omitNorms up front, but we should test that it doesn't confuse stupid hotspot.
|
// TODO: we can specialize these for omitNorms up front, but we should test that it doesn't confuse stupid hotspot.
|
||||||
|
|
|
@ -21,8 +21,8 @@ import java.io.IOException;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
|
||||||
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
|
||||||
import org.apache.lucene.index.Term;
|
import org.apache.lucene.index.Term;
|
||||||
import org.apache.lucene.search.Query;
|
import org.apache.lucene.search.Query;
|
||||||
import org.apache.lucene.search.Weight;
|
import org.apache.lucene.search.Weight;
|
||||||
|
|
|
@ -17,8 +17,8 @@ package org.apache.lucene.search.spans;
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.index.Term;
|
import org.apache.lucene.index.Term;
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
|
||||||
import org.apache.lucene.util.ArrayUtil;
|
import org.apache.lucene.util.ArrayUtil;
|
||||||
import org.apache.lucene.util.Bits;
|
import org.apache.lucene.util.Bits;
|
||||||
import org.apache.lucene.util.TermContext;
|
import org.apache.lucene.util.TermContext;
|
||||||
|
|
|
@ -17,8 +17,8 @@ package org.apache.lucene.search.spans;
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.index.Term;
|
import org.apache.lucene.index.Term;
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
|
||||||
import org.apache.lucene.util.Bits;
|
import org.apache.lucene.util.Bits;
|
||||||
import org.apache.lucene.util.PriorityQueue;
|
import org.apache.lucene.util.PriorityQueue;
|
||||||
import org.apache.lucene.util.TermContext;
|
import org.apache.lucene.util.TermContext;
|
||||||
|
|
|
@ -20,8 +20,8 @@ package org.apache.lucene.search.spans;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
|
||||||
import org.apache.lucene.index.Term;
|
import org.apache.lucene.index.Term;
|
||||||
import org.apache.lucene.search.MultiTermQuery;
|
import org.apache.lucene.search.MultiTermQuery;
|
||||||
import org.apache.lucene.search.Query;
|
import org.apache.lucene.search.Query;
|
||||||
|
|
|
@ -27,8 +27,8 @@ import java.util.Map;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
|
||||||
|
|
||||||
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
|
||||||
import org.apache.lucene.index.Term;
|
import org.apache.lucene.index.Term;
|
||||||
import org.apache.lucene.search.Query;
|
import org.apache.lucene.search.Query;
|
||||||
import org.apache.lucene.util.Bits;
|
import org.apache.lucene.util.Bits;
|
||||||
|
|
|
@ -17,8 +17,8 @@ package org.apache.lucene.search.spans;
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
|
||||||
import org.apache.lucene.index.Term;
|
import org.apache.lucene.index.Term;
|
||||||
import org.apache.lucene.search.Query;
|
import org.apache.lucene.search.Query;
|
||||||
import org.apache.lucene.util.Bits;
|
import org.apache.lucene.util.Bits;
|
||||||
|
|
|
@ -26,8 +26,8 @@ import java.util.Iterator;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
|
||||||
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
|
||||||
import org.apache.lucene.index.Term;
|
import org.apache.lucene.index.Term;
|
||||||
import org.apache.lucene.util.Bits;
|
import org.apache.lucene.util.Bits;
|
||||||
import org.apache.lucene.util.PriorityQueue;
|
import org.apache.lucene.util.PriorityQueue;
|
||||||
|
|
|
@ -17,8 +17,8 @@ package org.apache.lucene.search.spans;
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
|
||||||
import org.apache.lucene.index.Term;
|
import org.apache.lucene.index.Term;
|
||||||
import org.apache.lucene.search.Query;
|
import org.apache.lucene.search.Query;
|
||||||
import org.apache.lucene.util.Bits;
|
import org.apache.lucene.util.Bits;
|
||||||
|
|
|
@ -20,7 +20,7 @@ package org.apache.lucene.search.spans;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.index.Term;
|
import org.apache.lucene.index.Term;
|
||||||
import org.apache.lucene.search.Query;
|
import org.apache.lucene.search.Query;
|
||||||
import org.apache.lucene.search.IndexSearcher;
|
import org.apache.lucene.search.IndexSearcher;
|
||||||
|
|
|
@ -17,7 +17,7 @@ package org.apache.lucene.search.spans;
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.index.Fields;
|
import org.apache.lucene.index.Fields;
|
||||||
import org.apache.lucene.index.Term;
|
import org.apache.lucene.index.Term;
|
||||||
import org.apache.lucene.index.DocsAndPositionsEnum;
|
import org.apache.lucene.index.DocsAndPositionsEnum;
|
||||||
|
@ -93,7 +93,7 @@ public class SpanTermQuery extends SpanQuery {
|
||||||
if (termContext == null) {
|
if (termContext == null) {
|
||||||
// this happens with span-not query, as it doesn't include the NOT side in extractTerms()
|
// this happens with span-not query, as it doesn't include the NOT side in extractTerms()
|
||||||
// so we seek to the term now in this segment..., this sucks because its ugly mostly!
|
// so we seek to the term now in this segment..., this sucks because its ugly mostly!
|
||||||
final Fields fields = context.reader.fields();
|
final Fields fields = context.reader().fields();
|
||||||
if (fields != null) {
|
if (fields != null) {
|
||||||
final Terms terms = fields.terms(term.field());
|
final Terms terms = fields.terms(term.field());
|
||||||
if (terms != null) {
|
if (terms != null) {
|
||||||
|
@ -117,7 +117,7 @@ public class SpanTermQuery extends SpanQuery {
|
||||||
return TermSpans.EMPTY_TERM_SPANS;
|
return TermSpans.EMPTY_TERM_SPANS;
|
||||||
}
|
}
|
||||||
|
|
||||||
final TermsEnum termsEnum = context.reader.terms(term.field()).iterator(null);
|
final TermsEnum termsEnum = context.reader().terms(term.field()).iterator(null);
|
||||||
termsEnum.seekExact(term.bytes(), state);
|
termsEnum.seekExact(term.bytes(), state);
|
||||||
|
|
||||||
final DocsAndPositionsEnum postings = termsEnum.docsAndPositions(acceptDocs, null, false);
|
final DocsAndPositionsEnum postings = termsEnum.docsAndPositions(acceptDocs, null, false);
|
||||||
|
|
|
@ -17,8 +17,8 @@ package org.apache.lucene.search.spans;
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.index.IndexReader.ReaderContext;
|
import org.apache.lucene.index.IndexReaderContext;
|
||||||
import org.apache.lucene.index.Term;
|
import org.apache.lucene.index.Term;
|
||||||
import org.apache.lucene.search.*;
|
import org.apache.lucene.search.*;
|
||||||
import org.apache.lucene.search.similarities.Similarity;
|
import org.apache.lucene.search.similarities.Similarity;
|
||||||
|
@ -48,7 +48,7 @@ public class SpanWeight extends Weight {
|
||||||
termContexts = new HashMap<Term,TermContext>();
|
termContexts = new HashMap<Term,TermContext>();
|
||||||
TreeSet<Term> terms = new TreeSet<Term>();
|
TreeSet<Term> terms = new TreeSet<Term>();
|
||||||
query.extractTerms(terms);
|
query.extractTerms(terms);
|
||||||
final ReaderContext context = searcher.getTopReaderContext();
|
final IndexReaderContext context = searcher.getTopReaderContext();
|
||||||
final TermStatistics termStats[] = new TermStatistics[terms.size()];
|
final TermStatistics termStats[] = new TermStatistics[terms.size()];
|
||||||
int i = 0;
|
int i = 0;
|
||||||
for (Term term : terms) {
|
for (Term term : terms) {
|
||||||
|
@ -84,7 +84,7 @@ public class SpanWeight extends Weight {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
|
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
|
||||||
Scorer scorer = scorer(context, true, false, context.reader.getLiveDocs());
|
Scorer scorer = scorer(context, true, false, context.reader().getLiveDocs());
|
||||||
if (scorer != null) {
|
if (scorer != null) {
|
||||||
int newDoc = scorer.advance(doc);
|
int newDoc = scorer.advance(doc);
|
||||||
if (newDoc == doc) {
|
if (newDoc == doc) {
|
||||||
|
|
|
@ -23,7 +23,7 @@ import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.AtomicReader;
|
||||||
import org.apache.lucene.search.FieldCache;
|
import org.apache.lucene.search.FieldCache;
|
||||||
import org.apache.lucene.search.FieldCache.CacheEntry;
|
import org.apache.lucene.search.FieldCache.CacheEntry;
|
||||||
|
|
||||||
|
@ -146,9 +146,6 @@ public final class FieldCacheSanityChecker {
|
||||||
insanity.addAll(checkValueMismatch(valIdToItems,
|
insanity.addAll(checkValueMismatch(valIdToItems,
|
||||||
readerFieldToValIds,
|
readerFieldToValIds,
|
||||||
valMismatchKeys));
|
valMismatchKeys));
|
||||||
insanity.addAll(checkSubreaders(valIdToItems,
|
|
||||||
readerFieldToValIds));
|
|
||||||
|
|
||||||
return insanity.toArray(new Insanity[insanity.size()]);
|
return insanity.toArray(new Insanity[insanity.size()]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -189,107 +186,6 @@ public final class FieldCacheSanityChecker {
|
||||||
return insanity;
|
return insanity;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Internal helper method used by check that iterates over
|
|
||||||
* the keys of readerFieldToValIds and generates a Collection
|
|
||||||
* of Insanity instances whenever two (or more) ReaderField instances are
|
|
||||||
* found that have an ancestry relationships.
|
|
||||||
*
|
|
||||||
* @see InsanityType#SUBREADER
|
|
||||||
*/
|
|
||||||
private Collection<Insanity> checkSubreaders( MapOfSets<Integer, CacheEntry> valIdToItems,
|
|
||||||
MapOfSets<ReaderField, Integer> readerFieldToValIds) {
|
|
||||||
|
|
||||||
final List<Insanity> insanity = new ArrayList<Insanity>(23);
|
|
||||||
|
|
||||||
Map<ReaderField, Set<ReaderField>> badChildren = new HashMap<ReaderField, Set<ReaderField>>(17);
|
|
||||||
MapOfSets<ReaderField, ReaderField> badKids = new MapOfSets<ReaderField, ReaderField>(badChildren); // wrapper
|
|
||||||
|
|
||||||
Map<Integer, Set<CacheEntry>> viToItemSets = valIdToItems.getMap();
|
|
||||||
Map<ReaderField, Set<Integer>> rfToValIdSets = readerFieldToValIds.getMap();
|
|
||||||
|
|
||||||
Set<ReaderField> seen = new HashSet<ReaderField>(17);
|
|
||||||
|
|
||||||
Set<ReaderField> readerFields = rfToValIdSets.keySet();
|
|
||||||
for (final ReaderField rf : readerFields) {
|
|
||||||
|
|
||||||
if (seen.contains(rf)) continue;
|
|
||||||
|
|
||||||
List<Object> kids = getAllDescendantReaderKeys(rf.readerKey);
|
|
||||||
for (Object kidKey : kids) {
|
|
||||||
ReaderField kid = new ReaderField(kidKey, rf.fieldName);
|
|
||||||
|
|
||||||
if (badChildren.containsKey(kid)) {
|
|
||||||
// we've already process this kid as RF and found other problems
|
|
||||||
// track those problems as our own
|
|
||||||
badKids.put(rf, kid);
|
|
||||||
badKids.putAll(rf, badChildren.get(kid));
|
|
||||||
badChildren.remove(kid);
|
|
||||||
|
|
||||||
} else if (rfToValIdSets.containsKey(kid)) {
|
|
||||||
// we have cache entries for the kid
|
|
||||||
badKids.put(rf, kid);
|
|
||||||
}
|
|
||||||
seen.add(kid);
|
|
||||||
}
|
|
||||||
seen.add(rf);
|
|
||||||
}
|
|
||||||
|
|
||||||
// every mapping in badKids represents an Insanity
|
|
||||||
for (final ReaderField parent : badChildren.keySet()) {
|
|
||||||
Set<ReaderField> kids = badChildren.get(parent);
|
|
||||||
|
|
||||||
List<CacheEntry> badEntries = new ArrayList<CacheEntry>(kids.size() * 2);
|
|
||||||
|
|
||||||
// put parent entr(ies) in first
|
|
||||||
{
|
|
||||||
for (final Integer value : rfToValIdSets.get(parent)) {
|
|
||||||
badEntries.addAll(viToItemSets.get(value));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// now the entries for the descendants
|
|
||||||
for (final ReaderField kid : kids) {
|
|
||||||
for (final Integer value : rfToValIdSets.get(kid)) {
|
|
||||||
badEntries.addAll(viToItemSets.get(value));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
CacheEntry[] badness = new CacheEntry[badEntries.size()];
|
|
||||||
badness = badEntries.toArray(badness);
|
|
||||||
|
|
||||||
insanity.add(new Insanity(InsanityType.SUBREADER,
|
|
||||||
"Found caches for descendants of " +
|
|
||||||
parent.toString(),
|
|
||||||
badness));
|
|
||||||
}
|
|
||||||
|
|
||||||
return insanity;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Checks if the seed is an IndexReader, and if so will walk
|
|
||||||
* the hierarchy of subReaders building up a list of the objects
|
|
||||||
* returned by obj.getFieldCacheKey()
|
|
||||||
*/
|
|
||||||
private List<Object> getAllDescendantReaderKeys(Object seed) {
|
|
||||||
List<Object> all = new ArrayList<Object>(17); // will grow as we iter
|
|
||||||
all.add(seed);
|
|
||||||
for (int i = 0; i < all.size(); i++) {
|
|
||||||
Object obj = all.get(i);
|
|
||||||
if (obj instanceof IndexReader) {
|
|
||||||
IndexReader[] subs = ((IndexReader)obj).getSequentialSubReaders();
|
|
||||||
for (int j = 0; (null != subs) && (j < subs.length); j++) {
|
|
||||||
all.add(subs[j].getCoreCacheKey());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
// need to skip the first, because it was the seed
|
|
||||||
return all.subList(1, all.size());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Simple pair object for using "readerKey + fieldName" a Map key
|
* Simple pair object for using "readerKey + fieldName" a Map key
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -18,17 +18,16 @@ package org.apache.lucene.util;
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collection;
|
|
||||||
import java.util.HashSet;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.index.FieldInfo;
|
import org.apache.lucene.index.FieldInfo;
|
||||||
import org.apache.lucene.index.FieldInfos;
|
import org.apache.lucene.index.FieldInfos;
|
||||||
|
import org.apache.lucene.index.CompositeReader;
|
||||||
|
import org.apache.lucene.index.AtomicReader;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
import org.apache.lucene.index.IndexReaderContext;
|
||||||
import org.apache.lucene.index.IndexReader.CompositeReaderContext;
|
|
||||||
import org.apache.lucene.index.IndexReader.ReaderContext;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Common util methods for dealing with {@link IndexReader}s.
|
* Common util methods for dealing with {@link IndexReader}s.
|
||||||
|
@ -68,11 +67,11 @@ public final class ReaderUtil {
|
||||||
* @param reader
|
* @param reader
|
||||||
*/
|
*/
|
||||||
|
|
||||||
public static void gatherSubReaders(final List<IndexReader> allSubReaders, IndexReader reader) {
|
public static void gatherSubReaders(final List<AtomicReader> allSubReaders, IndexReader reader) {
|
||||||
try {
|
try {
|
||||||
new Gather(reader) {
|
new Gather(reader) {
|
||||||
@Override
|
@Override
|
||||||
protected void add(int base, IndexReader r) {
|
protected void add(int base, AtomicReader r) {
|
||||||
allSubReaders.add(r);
|
allSubReaders.add(r);
|
||||||
}
|
}
|
||||||
}.run();
|
}.run();
|
||||||
|
@ -103,13 +102,13 @@ public final class ReaderUtil {
|
||||||
}
|
}
|
||||||
|
|
||||||
private int run(int base, IndexReader reader) throws IOException {
|
private int run(int base, IndexReader reader) throws IOException {
|
||||||
IndexReader[] subReaders = reader.getSequentialSubReaders();
|
if (reader instanceof AtomicReader) {
|
||||||
if (subReaders == null) {
|
|
||||||
// atomic reader
|
// atomic reader
|
||||||
add(base, reader);
|
add(base, (AtomicReader) reader);
|
||||||
base += reader.maxDoc();
|
base += reader.maxDoc();
|
||||||
} else {
|
} else {
|
||||||
// composite reader
|
assert reader instanceof CompositeReader : "must be a composite reader";
|
||||||
|
IndexReader[] subReaders = ((CompositeReader) reader).getSequentialSubReaders();
|
||||||
for (int i = 0; i < subReaders.length; i++) {
|
for (int i = 0; i < subReaders.length; i++) {
|
||||||
base = run(base, subReaders[i]);
|
base = run(base, subReaders[i]);
|
||||||
}
|
}
|
||||||
|
@ -118,68 +117,7 @@ public final class ReaderUtil {
|
||||||
return base;
|
return base;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected abstract void add(int base, IndexReader r) throws IOException;
|
protected abstract void add(int base, AtomicReader r) throws IOException;
|
||||||
}
|
|
||||||
|
|
||||||
public static ReaderContext buildReaderContext(IndexReader reader) {
|
|
||||||
return new ReaderContextBuilder(reader).build();
|
|
||||||
}
|
|
||||||
|
|
||||||
public static class ReaderContextBuilder {
|
|
||||||
private final IndexReader reader;
|
|
||||||
private final AtomicReaderContext[] leaves;
|
|
||||||
private int leafOrd = 0;
|
|
||||||
private int leafDocBase = 0;
|
|
||||||
public ReaderContextBuilder(IndexReader reader) {
|
|
||||||
this.reader = reader;
|
|
||||||
leaves = new AtomicReaderContext[numLeaves(reader)];
|
|
||||||
}
|
|
||||||
|
|
||||||
public ReaderContext build() {
|
|
||||||
return build(null, reader, 0, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
private ReaderContext build(CompositeReaderContext parent, IndexReader reader, int ord, int docBase) {
|
|
||||||
IndexReader[] sequentialSubReaders = reader.getSequentialSubReaders();
|
|
||||||
if (sequentialSubReaders == null) {
|
|
||||||
AtomicReaderContext atomic = new AtomicReaderContext(parent, reader, ord, docBase, leafOrd, leafDocBase);
|
|
||||||
leaves[leafOrd++] = atomic;
|
|
||||||
leafDocBase += reader.maxDoc();
|
|
||||||
return atomic;
|
|
||||||
} else {
|
|
||||||
ReaderContext[] children = new ReaderContext[sequentialSubReaders.length];
|
|
||||||
final CompositeReaderContext newParent;
|
|
||||||
if (parent == null) {
|
|
||||||
newParent = new CompositeReaderContext(reader, children, leaves);
|
|
||||||
} else {
|
|
||||||
newParent = new CompositeReaderContext(parent, reader, ord, docBase, children);
|
|
||||||
}
|
|
||||||
|
|
||||||
int newDocBase = 0;
|
|
||||||
for (int i = 0; i < sequentialSubReaders.length; i++) {
|
|
||||||
children[i] = build(newParent, sequentialSubReaders[i], i, newDocBase);
|
|
||||||
newDocBase += sequentialSubReaders[i].maxDoc();
|
|
||||||
}
|
|
||||||
return newParent;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private int numLeaves(IndexReader reader) {
|
|
||||||
final int[] numLeaves = new int[1];
|
|
||||||
try {
|
|
||||||
new Gather(reader) {
|
|
||||||
@Override
|
|
||||||
protected void add(int base, IndexReader r) {
|
|
||||||
numLeaves[0]++;
|
|
||||||
}
|
|
||||||
}.run();
|
|
||||||
} catch (IOException ioe) {
|
|
||||||
// won't happen
|
|
||||||
throw new RuntimeException(ioe);
|
|
||||||
}
|
|
||||||
return numLeaves[0];
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -188,11 +126,10 @@ public final class ReaderUtil {
|
||||||
* <code>null</code> the given context must be an instance of
|
* <code>null</code> the given context must be an instance of
|
||||||
* {@link AtomicReaderContext}
|
* {@link AtomicReaderContext}
|
||||||
*/
|
*/
|
||||||
public static AtomicReaderContext[] leaves(ReaderContext context) {
|
public static AtomicReaderContext[] leaves(IndexReaderContext context) {
|
||||||
assert context != null && context.isTopLevel : "context must be non-null & top-level";
|
assert context != null && context.isTopLevel : "context must be non-null & top-level";
|
||||||
final AtomicReaderContext[] leaves = context.leaves();
|
final AtomicReaderContext[] leaves = context.leaves();
|
||||||
if (leaves == null) {
|
if (leaves == null) {
|
||||||
assert context.isAtomic : "top-level context without leaves must be atomic";
|
|
||||||
return new AtomicReaderContext[] { (AtomicReaderContext) context };
|
return new AtomicReaderContext[] { (AtomicReaderContext) context };
|
||||||
}
|
}
|
||||||
return leaves;
|
return leaves;
|
||||||
|
@ -202,7 +139,7 @@ public final class ReaderUtil {
|
||||||
* Walks up the reader tree and return the given context's top level reader
|
* Walks up the reader tree and return the given context's top level reader
|
||||||
* context, or in other words the reader tree's root context.
|
* context, or in other words the reader tree's root context.
|
||||||
*/
|
*/
|
||||||
public static ReaderContext getTopLevelContext(ReaderContext context) {
|
public static IndexReaderContext getTopLevelContext(IndexReaderContext context) {
|
||||||
while (context.parent != null) {
|
while (context.parent != null) {
|
||||||
context = context.parent;
|
context = context.parent;
|
||||||
}
|
}
|
||||||
|
@ -260,26 +197,4 @@ public final class ReaderUtil {
|
||||||
}
|
}
|
||||||
return hi;
|
return hi;
|
||||||
}
|
}
|
||||||
|
|
||||||
public static Collection<String> getIndexedFields(IndexReader reader) {
|
|
||||||
final Collection<String> fields = new HashSet<String>();
|
|
||||||
for(FieldInfo fieldInfo : getMergedFieldInfos(reader)) {
|
|
||||||
if (fieldInfo.isIndexed) {
|
|
||||||
fields.add(fieldInfo.name);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return fields;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Call this to get the (merged) FieldInfos for a
|
|
||||||
* composite reader */
|
|
||||||
public static FieldInfos getMergedFieldInfos(IndexReader reader) {
|
|
||||||
final List<IndexReader> subReaders = new ArrayList<IndexReader>();
|
|
||||||
ReaderUtil.gatherSubReaders(subReaders, reader);
|
|
||||||
final FieldInfos fieldInfos = new FieldInfos();
|
|
||||||
for(IndexReader subReader : subReaders) {
|
|
||||||
fieldInfos.add(subReader.getFieldInfos());
|
|
||||||
}
|
|
||||||
return fieldInfos;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,10 +20,10 @@ package org.apache.lucene.util;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
|
|
||||||
|
import org.apache.lucene.index.AtomicReaderContext;
|
||||||
import org.apache.lucene.index.Fields;
|
import org.apache.lucene.index.Fields;
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
|
||||||
import org.apache.lucene.index.IndexReader.ReaderContext;
|
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
|
import org.apache.lucene.index.IndexReaderContext;
|
||||||
import org.apache.lucene.index.Term;
|
import org.apache.lucene.index.Term;
|
||||||
import org.apache.lucene.index.TermState;
|
import org.apache.lucene.index.TermState;
|
||||||
import org.apache.lucene.index.Terms;
|
import org.apache.lucene.index.Terms;
|
||||||
|
@ -39,7 +39,7 @@ import org.apache.lucene.index.TermsEnum;
|
||||||
* @lucene.experimental
|
* @lucene.experimental
|
||||||
*/
|
*/
|
||||||
public final class TermContext {
|
public final class TermContext {
|
||||||
public final ReaderContext topReaderContext; // for asserting!
|
public final IndexReaderContext topReaderContext; // for asserting!
|
||||||
private final TermState[] states;
|
private final TermState[] states;
|
||||||
private int docFreq;
|
private int docFreq;
|
||||||
private long totalTermFreq;
|
private long totalTermFreq;
|
||||||
|
@ -47,9 +47,9 @@ public final class TermContext {
|
||||||
//public static boolean DEBUG = BlockTreeTermsWriter.DEBUG;
|
//public static boolean DEBUG = BlockTreeTermsWriter.DEBUG;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates an empty {@link TermContext} from a {@link ReaderContext}
|
* Creates an empty {@link TermContext} from a {@link IndexReaderContext}
|
||||||
*/
|
*/
|
||||||
public TermContext(ReaderContext context) {
|
public TermContext(IndexReaderContext context) {
|
||||||
assert context != null && context.isTopLevel;
|
assert context != null && context.isTopLevel;
|
||||||
topReaderContext = context;
|
topReaderContext = context;
|
||||||
docFreq = 0;
|
docFreq = 0;
|
||||||
|
@ -66,20 +66,20 @@ public final class TermContext {
|
||||||
* Creates a {@link TermContext} with an initial {@link TermState},
|
* Creates a {@link TermContext} with an initial {@link TermState},
|
||||||
* {@link IndexReader} pair.
|
* {@link IndexReader} pair.
|
||||||
*/
|
*/
|
||||||
public TermContext(ReaderContext context, TermState state, int ord, int docFreq, long totalTermFreq) {
|
public TermContext(IndexReaderContext context, TermState state, int ord, int docFreq, long totalTermFreq) {
|
||||||
this(context);
|
this(context);
|
||||||
register(state, ord, docFreq, totalTermFreq);
|
register(state, ord, docFreq, totalTermFreq);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a {@link TermContext} from a top-level {@link ReaderContext} and the
|
* Creates a {@link TermContext} from a top-level {@link IndexReaderContext} and the
|
||||||
* given {@link Term}. This method will lookup the given term in all context's leaf readers
|
* given {@link Term}. This method will lookup the given term in all context's leaf readers
|
||||||
* and register each of the readers containing the term in the returned {@link TermContext}
|
* and register each of the readers containing the term in the returned {@link TermContext}
|
||||||
* using the leaf reader's ordinal.
|
* using the leaf reader's ordinal.
|
||||||
* <p>
|
* <p>
|
||||||
* Note: the given context must be a top-level context.
|
* Note: the given context must be a top-level context.
|
||||||
*/
|
*/
|
||||||
public static TermContext build(ReaderContext context, Term term, boolean cache)
|
public static TermContext build(IndexReaderContext context, Term term, boolean cache)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
assert context != null && context.isTopLevel;
|
assert context != null && context.isTopLevel;
|
||||||
final String field = term.field();
|
final String field = term.field();
|
||||||
|
@ -89,7 +89,7 @@ public final class TermContext {
|
||||||
//if (DEBUG) System.out.println("prts.build term=" + term);
|
//if (DEBUG) System.out.println("prts.build term=" + term);
|
||||||
for (int i = 0; i < leaves.length; i++) {
|
for (int i = 0; i < leaves.length; i++) {
|
||||||
//if (DEBUG) System.out.println(" r=" + leaves[i].reader);
|
//if (DEBUG) System.out.println(" r=" + leaves[i].reader);
|
||||||
final Fields fields = leaves[i].reader.fields();
|
final Fields fields = leaves[i].reader().fields();
|
||||||
if (fields != null) {
|
if (fields != null) {
|
||||||
final Terms terms = fields.terms(field);
|
final Terms terms = fields.terms(field);
|
||||||
if (terms != null) {
|
if (terms != null) {
|
||||||
|
@ -116,7 +116,7 @@ public final class TermContext {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Registers and associates a {@link TermState} with an leaf ordinal. The leaf ordinal
|
* Registers and associates a {@link TermState} with an leaf ordinal. The leaf ordinal
|
||||||
* should be derived from a {@link ReaderContext}'s leaf ord.
|
* should be derived from a {@link IndexReaderContext}'s leaf ord.
|
||||||
*/
|
*/
|
||||||
public void register(TermState state, final int ord, final int docFreq, final long totalTermFreq) {
|
public void register(TermState state, final int ord, final int docFreq, final long totalTermFreq) {
|
||||||
assert state != null : "state must not be null";
|
assert state != null : "state must not be null";
|
||||||
|
|
|
@ -328,7 +328,7 @@ public class RandomIndexWriter implements Closeable {
|
||||||
w.deleteAll();
|
w.deleteAll();
|
||||||
}
|
}
|
||||||
|
|
||||||
public IndexReader getReader() throws IOException {
|
public DirectoryReader getReader() throws IOException {
|
||||||
return getReader(true);
|
return getReader(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -367,7 +367,7 @@ public class RandomIndexWriter implements Closeable {
|
||||||
switchDoDocValues();
|
switchDoDocValues();
|
||||||
}
|
}
|
||||||
|
|
||||||
public IndexReader getReader(boolean applyDeletions) throws IOException {
|
public DirectoryReader getReader(boolean applyDeletions) throws IOException {
|
||||||
getReaderCalled = true;
|
getReaderCalled = true;
|
||||||
if (r.nextInt(4) == 2) {
|
if (r.nextInt(4) == 2) {
|
||||||
doRandomForceMerge();
|
doRandomForceMerge();
|
||||||
|
|
|
@ -455,7 +455,7 @@ public abstract class ThreadedIndexingAndSearchingTestCase extends LuceneTestCas
|
||||||
|
|
||||||
conf.setMergedSegmentWarmer(new IndexWriter.IndexReaderWarmer() {
|
conf.setMergedSegmentWarmer(new IndexWriter.IndexReaderWarmer() {
|
||||||
@Override
|
@Override
|
||||||
public void warm(IndexReader reader) throws IOException {
|
public void warm(AtomicReader reader) throws IOException {
|
||||||
if (VERBOSE) {
|
if (VERBOSE) {
|
||||||
System.out.println("TEST: now warm merged reader=" + reader);
|
System.out.println("TEST: now warm merged reader=" + reader);
|
||||||
}
|
}
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue