diff --git a/dev-tools/idea/lucene/highlighter/highlighter.iml b/dev-tools/idea/lucene/highlighter/highlighter.iml
index 0787fb84acf..8a45e2393cb 100644
--- a/dev-tools/idea/lucene/highlighter/highlighter.iml
+++ b/dev-tools/idea/lucene/highlighter/highlighter.iml
@@ -12,6 +12,7 @@
+
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 0c0fab4d81b..6b2eb9b0643 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -74,6 +74,11 @@ API Changes
* LUCENE-6218: Add Collector.needsScores() and needsScores parameter
to Weight.scorer(). (Robert Muir)
+* LUCENE-4524: Merge DocsEnum and DocsAndPositionsEnum into a single
+ PostingsEnum iterator. TermsEnum.docs() and TermsEnum.docsAndPositions()
+ are replaced by TermsEnum.postings(). (Alan Woodward, Simon Willnauer,
+ Robert Muir)
+
Other
* LUCENE-6193: Collapse identical catch branches in try-catch statements.
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java
index 7381e1191b3..e3f88802633 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java
@@ -27,7 +27,7 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
@@ -96,7 +96,7 @@ public class TestKeywordAnalyzer extends BaseTokenStreamTestCase {
writer.close();
IndexReader reader = DirectoryReader.open(dir);
- DocsEnum td = TestUtil.docs(random(),
+ PostingsEnum td = TestUtil.docs(random(),
reader,
"partnum",
new BytesRef("Q36"),
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java
index 9c63bb099a9..f5a4d35206f 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java
@@ -31,7 +31,7 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Terms;
@@ -111,7 +111,7 @@ public class TestTeeSinkTokenFilter extends BaseTokenStreamTestCase {
TermsEnum termsEnum = vector.iterator(null);
termsEnum.next();
assertEquals(2, termsEnum.totalTermFreq());
- DocsAndPositionsEnum positions = termsEnum.docsAndPositions(null, null);
+ PostingsEnum positions = termsEnum.postings(null, null, PostingsEnum.FLAG_ALL);
assertTrue(positions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(2, positions.freq());
positions.nextPosition();
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestClassicAnalyzer.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestClassicAnalyzer.java
index 3b3706f41f8..67cab606eb9 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestClassicAnalyzer.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestClassicAnalyzer.java
@@ -2,12 +2,11 @@ package org.apache.lucene.analysis.standard;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.analysis.standard.ClassicAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
@@ -281,7 +280,7 @@ public class TestClassicAnalyzer extends BaseTokenStreamTestCase {
// Make sure position is still incremented when
// massive term is skipped:
- DocsAndPositionsEnum tps = MultiFields.getTermPositionsEnum(reader,
+ PostingsEnum tps = MultiFields.getTermPositionsEnum(reader,
MultiFields.getLiveDocs(reader),
"content",
new BytesRef("another"));
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java b/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
index a73bcc411f8..09c6f3191e5 100644
--- a/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
+++ b/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
@@ -932,7 +932,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
writer.addDocument(doc);
}
- private int countDocs(DocsEnum docs) throws IOException {
+ private int countDocs(PostingsEnum docs) throws IOException {
int count = 0;
while((docs.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
count ++;
@@ -958,7 +958,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
// should be found exactly
assertEquals(TermsEnum.SeekStatus.FOUND,
terms.seekCeil(aaaTerm));
- assertEquals(35, countDocs(TestUtil.docs(random(), terms, null, null, DocsEnum.FLAG_NONE)));
+ assertEquals(35, countDocs(TestUtil.docs(random(), terms, null, null, PostingsEnum.FLAG_NONE)));
assertNull(terms.next());
// should hit end of field
@@ -970,12 +970,12 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
assertEquals(TermsEnum.SeekStatus.NOT_FOUND,
terms.seekCeil(new BytesRef("a")));
assertTrue(terms.term().bytesEquals(aaaTerm));
- assertEquals(35, countDocs(TestUtil.docs(random(), terms, null, null, DocsEnum.FLAG_NONE)));
+ assertEquals(35, countDocs(TestUtil.docs(random(), terms, null, null, PostingsEnum.FLAG_NONE)));
assertNull(terms.next());
assertEquals(TermsEnum.SeekStatus.FOUND,
terms.seekCeil(aaaTerm));
- assertEquals(35, countDocs(TestUtil.docs(random(), terms, null, null, DocsEnum.FLAG_NONE)));
+ assertEquals(35, countDocs(TestUtil.docs(random(), terms, null, null, PostingsEnum.FLAG_NONE)));
assertNull(terms.next());
r.close();
diff --git a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java
index d89a6708962..ca748c21308 100644
--- a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java
+++ b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java
@@ -40,7 +40,7 @@ import org.apache.lucene.benchmark.byTask.tasks.WriteLineDocTask;
import org.apache.lucene.collation.CollationKeyAnalyzer;
import org.apache.lucene.facet.taxonomy.TaxonomyReader;
import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
@@ -497,9 +497,9 @@ public class TestPerfTasksLogic extends BenchmarkTestCase {
continue;
}
TermsEnum termsEnum = terms.iterator(null);
- DocsEnum docs = null;
+ PostingsEnum docs = null;
while(termsEnum.next() != null) {
- docs = TestUtil.docs(random(), termsEnum, MultiFields.getLiveDocs(reader), docs, DocsEnum.FLAG_FREQS);
+ docs = TestUtil.docs(random(), termsEnum, MultiFields.getLiveDocs(reader), docs, PostingsEnum.FLAG_FREQS);
while(docs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
totalTokenCount2 += docs.freq();
}
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsReader.java
index 80540e91a06..8001657aeea 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsReader.java
@@ -30,8 +30,7 @@ import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.codecs.FieldsProducer;
import org.apache.lucene.codecs.PostingsReaderBase;
import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.index.IndexOptions;
@@ -652,22 +651,19 @@ public class BlockTermsReader extends FieldsProducer {
}
@Override
- public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
+ public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException {
+
+ if (PostingsEnum.requiresPositions(flags)) {
+ if (fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
+ // Positions were not indexed:
+ return null;
+ }
+ }
+
//System.out.println("BTR.docs this=" + this);
decodeMetaData();
//System.out.println("BTR.docs: state.docFreq=" + state.docFreq);
- return postingsReader.docs(fieldInfo, state, liveDocs, reuse, flags);
- }
-
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
- if (fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
- // Positions were not indexed:
- return null;
- }
-
- decodeMetaData();
- return postingsReader.docsAndPositions(fieldInfo, state, liveDocs, reuse, flags);
+ return postingsReader.postings(fieldInfo, state, liveDocs, reuse, flags);
}
@Override
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsIntersectTermsEnum.java b/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsIntersectTermsEnum.java
index 0eb970908ec..2e4daaeba8f 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsIntersectTermsEnum.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsIntersectTermsEnum.java
@@ -20,8 +20,7 @@ package org.apache.lucene.codecs.blocktreeords;
import java.io.IOException;
import org.apache.lucene.codecs.blocktreeords.FSTOrdsOutputs.Output;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.TermState;
import org.apache.lucene.index.TermsEnum;
@@ -203,20 +202,17 @@ final class OrdsIntersectTermsEnum extends TermsEnum {
}
@Override
- public DocsEnum docs(Bits skipDocs, DocsEnum reuse, int flags) throws IOException {
- currentFrame.decodeMetaData();
- return fr.parent.postingsReader.docs(fr.fieldInfo, currentFrame.termState, skipDocs, reuse, flags);
- }
+ public PostingsEnum postings(Bits skipDocs, PostingsEnum reuse, int flags) throws IOException {
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits skipDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
- if (fr.fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
- // Positions were not indexed:
- return null;
+ if (PostingsEnum.requiresPositions(flags)) {
+ if (fr.fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
+ // Positions were not indexed:
+ return null;
+ }
}
currentFrame.decodeMetaData();
- return fr.parent.postingsReader.docsAndPositions(fr.fieldInfo, currentFrame.termState, skipDocs, reuse, flags);
+ return fr.parent.postingsReader.postings(fr.fieldInfo, currentFrame.termState, skipDocs, reuse, flags);
}
private int getState() {
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsSegmentTermsEnum.java b/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsSegmentTermsEnum.java
index 8bdd248bca1..4f0182f2be7 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsSegmentTermsEnum.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsSegmentTermsEnum.java
@@ -25,8 +25,7 @@ import java.io.PrintStream;
import org.apache.lucene.codecs.BlockTermState;
import org.apache.lucene.codecs.blocktreeords.FSTOrdsOutputs.Output;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.TermState;
import org.apache.lucene.index.TermsEnum;
@@ -924,7 +923,15 @@ public final class OrdsSegmentTermsEnum extends TermsEnum {
}
@Override
- public DocsEnum docs(Bits skipDocs, DocsEnum reuse, int flags) throws IOException {
+ public PostingsEnum postings(Bits skipDocs, PostingsEnum reuse, int flags) throws IOException {
+
+ if (PostingsEnum.requiresPositions(flags)) {
+ if (fr.fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
+ // Positions were not indexed:
+ return null;
+ }
+ }
+
assert !eof;
//if (DEBUG) {
//System.out.println("BTTR.docs seg=" + segment);
@@ -933,19 +940,7 @@ public final class OrdsSegmentTermsEnum extends TermsEnum {
//if (DEBUG) {
//System.out.println(" state=" + currentFrame.state);
//}
- return fr.parent.postingsReader.docs(fr.fieldInfo, currentFrame.state, skipDocs, reuse, flags);
- }
-
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits skipDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
- if (fr.fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
- // Positions were not indexed:
- return null;
- }
-
- assert !eof;
- currentFrame.decodeMetaData();
- return fr.parent.postingsReader.docsAndPositions(fr.fieldInfo, currentFrame.state, skipDocs, reuse, flags);
+ return fr.parent.postingsReader.postings(fr.fieldInfo, currentFrame.state, skipDocs, reuse, flags);
}
@Override
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/bloom/BloomFilteringPostingsFormat.java b/lucene/codecs/src/java/org/apache/lucene/codecs/bloom/BloomFilteringPostingsFormat.java
index 09ca6e0ea18..ec8f5e519cc 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/bloom/BloomFilteringPostingsFormat.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/bloom/BloomFilteringPostingsFormat.java
@@ -32,8 +32,7 @@ import org.apache.lucene.codecs.FieldsConsumer;
import org.apache.lucene.codecs.FieldsProducer;
import org.apache.lucene.codecs.PostingsFormat;
import org.apache.lucene.codecs.bloom.FuzzySet.ContainsResult;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.IndexFileNames;
@@ -382,19 +381,13 @@ public final class BloomFilteringPostingsFormat extends PostingsFormat {
public long totalTermFreq() throws IOException {
return delegate().totalTermFreq();
}
-
@Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs,
- DocsAndPositionsEnum reuse, int flags) throws IOException {
- return delegate().docsAndPositions(liveDocs, reuse, flags);
- }
-
- @Override
- public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags)
+ public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags)
throws IOException {
- return delegate().docs(liveDocs, reuse, flags);
+ return delegate().postings(liveDocs, reuse, flags);
}
+
}
@Override
@@ -460,7 +453,7 @@ public final class BloomFilteringPostingsFormat extends PostingsFormat {
FuzzySet bloomFilter = null;
- DocsEnum docsEnum = null;
+ PostingsEnum postingsEnum = null;
while (true) {
BytesRef term = termsEnum.next();
if (term == null) {
@@ -476,8 +469,8 @@ public final class BloomFilteringPostingsFormat extends PostingsFormat {
bloomFilters.put(fieldInfo, bloomFilter);
}
// Make sure there's at least one doc for this term:
- docsEnum = termsEnum.docs(null, docsEnum, 0);
- if (docsEnum.nextDoc() != DocsEnum.NO_MORE_DOCS) {
+ postingsEnum = termsEnum.postings(null, postingsEnum, 0);
+ if (postingsEnum.nextDoc() != PostingsEnum.NO_MORE_DOCS) {
bloomFilter.addValue(term);
}
}
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectPostingsFormat.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectPostingsFormat.java
index 562c9dcea86..bdcb9f9c799 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectPostingsFormat.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectPostingsFormat.java
@@ -27,13 +27,13 @@ import java.util.TreeMap;
import org.apache.lucene.codecs.FieldsConsumer;
import org.apache.lucene.codecs.FieldsProducer;
import org.apache.lucene.codecs.PostingsFormat;
-import org.apache.lucene.codecs.lucene50.Lucene50PostingsFormat; // javadocs
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.codecs.lucene50.Lucene50PostingsFormat;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.OrdTermState;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.SegmentReadState;
import org.apache.lucene.index.SegmentWriteState;
import org.apache.lucene.index.TermState;
@@ -51,7 +51,7 @@ import org.apache.lucene.util.automaton.CompiledAutomaton;
import org.apache.lucene.util.automaton.RunAutomaton;
import org.apache.lucene.util.automaton.Transition;
-// TODO:
+// TODO:
// - build depth-N prefix hash?
// - or: longer dense skip lists than just next byte?
@@ -62,7 +62,7 @@ import org.apache.lucene.util.automaton.Transition;
*
WARNING: This is
* exceptionally RAM intensive: it makes no effort to
* compress the postings data, storing terms as separate
- * byte[] and postings as separate int[], but as a result it
+ * byte[] and postings as separate int[], but as a result it
* gives substantial increase in search performance.
*
*
This postings format supports {@link TermsEnum#ord}
@@ -89,7 +89,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
public DirectPostingsFormat() {
this(DEFAULT_MIN_SKIP_COUNT, DEFAULT_LOW_FREQ_CUTOFF);
}
-
+
/** minSkipCount is how many terms in a row must have the
* same prefix before we put a skip pointer down. Terms
* with docFreq <= lowFreqCutoff will use a single int[]
@@ -100,7 +100,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
this.minSkipCount = minSkipCount;
this.lowFreqCutoff = lowFreqCutoff;
}
-
+
@Override
public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException {
return PostingsFormat.forName("Lucene50").fieldsConsumer(state);
@@ -161,7 +161,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
}
return sizeInBytes;
}
-
+
@Override
public Collection getChildResources() {
return Accountables.namedAccountables("field", fields);
@@ -206,9 +206,10 @@ public final class DirectPostingsFormat extends PostingsFormat {
@Override
public long ramBytesUsed() {
return BASE_RAM_BYTES_USED +
- ((postings!=null) ? RamUsageEstimator.sizeOf(postings) : 0) +
+ ((postings!=null) ? RamUsageEstimator.sizeOf(postings) : 0) +
((payloads!=null) ? RamUsageEstimator.sizeOf(payloads) : 0);
}
+
}
// TODO: maybe specialize into prx/no-prx/no-frq cases?
@@ -232,31 +233,32 @@ public final class DirectPostingsFormat extends PostingsFormat {
@Override
public long ramBytesUsed() {
- long sizeInBytes = BASE_RAM_BYTES_USED;
- sizeInBytes += (docIDs!=null)? RamUsageEstimator.sizeOf(docIDs) : 0;
- sizeInBytes += (freqs!=null)? RamUsageEstimator.sizeOf(freqs) : 0;
-
- if(positions != null) {
- sizeInBytes += RamUsageEstimator.shallowSizeOf(positions);
- for(int[] position : positions) {
- sizeInBytes += (position!=null) ? RamUsageEstimator.sizeOf(position) : 0;
- }
- }
-
- if (payloads != null) {
- sizeInBytes += RamUsageEstimator.shallowSizeOf(payloads);
- for(byte[][] payload : payloads) {
- if(payload != null) {
- sizeInBytes += RamUsageEstimator.shallowSizeOf(payload);
- for(byte[] pload : payload) {
- sizeInBytes += (pload!=null) ? RamUsageEstimator.sizeOf(pload) : 0;
- }
- }
- }
- }
-
- return sizeInBytes;
+ long sizeInBytes = BASE_RAM_BYTES_USED;
+ sizeInBytes += (docIDs!=null)? RamUsageEstimator.sizeOf(docIDs) : 0;
+ sizeInBytes += (freqs!=null)? RamUsageEstimator.sizeOf(freqs) : 0;
+
+ if(positions != null) {
+ sizeInBytes += RamUsageEstimator.shallowSizeOf(positions);
+ for(int[] position : positions) {
+ sizeInBytes += (position!=null) ? RamUsageEstimator.sizeOf(position) : 0;
+ }
+ }
+
+ if (payloads != null) {
+ sizeInBytes += RamUsageEstimator.shallowSizeOf(payloads);
+ for(byte[][] payload : payloads) {
+ if(payload != null) {
+ sizeInBytes += RamUsageEstimator.shallowSizeOf(payload);
+ for(byte[] pload : payload) {
+ sizeInBytes += (pload!=null) ? RamUsageEstimator.sizeOf(pload) : 0;
+ }
+ }
+ }
+ }
+
+ return sizeInBytes;
}
+
}
private final byte[] termBytes;
@@ -313,7 +315,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
}
terms = new TermAndSkip[numTerms];
termOffsets = new int[1+numTerms];
-
+
byte[] termBytes = new byte[1024];
this.minSkipCount = minSkipCount;
@@ -324,8 +326,8 @@ public final class DirectPostingsFormat extends PostingsFormat {
hasPayloads = fieldInfo.hasPayloads();
BytesRef term;
- DocsEnum docsEnum = null;
- DocsAndPositionsEnum docsAndPositionsEnum = null;
+ PostingsEnum postingsEnum = null;
+ PostingsEnum docsAndPositionsEnum = null;
final TermsEnum termsEnum = termsIn.iterator(null);
int termOffset = 0;
@@ -356,18 +358,18 @@ public final class DirectPostingsFormat extends PostingsFormat {
termOffsets[count+1] = termOffset;
if (hasPos) {
- docsAndPositionsEnum = termsEnum.docsAndPositions(null, docsAndPositionsEnum);
+ docsAndPositionsEnum = termsEnum.postings(null, docsAndPositionsEnum, PostingsEnum.FLAG_ALL);
} else {
- docsEnum = termsEnum.docs(null, docsEnum);
+ postingsEnum = termsEnum.postings(null, postingsEnum);
}
final TermAndSkip ent;
- final DocsEnum docsEnum2;
+ final PostingsEnum postingsEnum2;
if (hasPos) {
- docsEnum2 = docsAndPositionsEnum;
+ postingsEnum2 = docsAndPositionsEnum;
} else {
- docsEnum2 = docsEnum;
+ postingsEnum2 = postingsEnum;
}
int docID;
@@ -377,10 +379,10 @@ public final class DirectPostingsFormat extends PostingsFormat {
ros.reset();
// Pack postings for low-freq terms into a single int[]:
- while ((docID = docsEnum2.nextDoc()) != DocsEnum.NO_MORE_DOCS) {
+ while ((docID = postingsEnum2.nextDoc()) != PostingsEnum.NO_MORE_DOCS) {
scratch.add(docID);
if (hasFreq) {
- final int freq = docsEnum2.freq();
+ final int freq = postingsEnum2.freq();
scratch.add(freq);
if (hasPos) {
for(int pos=0;pos= PostingsEnum.FLAG_POSITIONS) {
+ if (!hasPos) {
+ return null;
+ }
+
+ if (terms[termOrd] instanceof LowFreqTerm) {
+ final LowFreqTerm term = ((LowFreqTerm) terms[termOrd]);
+ final int[] postings = term.postings;
+ final byte[] payloads = term.payloads;
+ return new LowFreqPostingsEnum(liveDocs, hasOffsets, hasPayloads).reset(postings, payloads);
+ } else {
+ final HighFreqTerm term = (HighFreqTerm) terms[termOrd];
+ return new HighFreqPostingsEnum(liveDocs, hasOffsets).reset(term.docIDs, term.freqs, term.positions, term.payloads);
+ }
+ }
+
if (terms[termOrd] instanceof LowFreqTerm) {
final int[] postings = ((LowFreqTerm) terms[termOrd]).postings;
if (hasFreq) {
@@ -927,25 +945,6 @@ public final class DirectPostingsFormat extends PostingsFormat {
}
}
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) {
- if (!hasPos) {
- return null;
- }
-
- // TODO: implement reuse
- // it's hairy!
-
- if (terms[termOrd] instanceof LowFreqTerm) {
- final LowFreqTerm term = ((LowFreqTerm) terms[termOrd]);
- final int[] postings = term.postings;
- final byte[] payloads = term.payloads;
- return new LowFreqDocsAndPositionsEnum(liveDocs, hasOffsets, hasPayloads).reset(postings, payloads);
- } else {
- final HighFreqTerm term = (HighFreqTerm) terms[termOrd];
- return new HighFreqDocsAndPositionsEnum(liveDocs, hasOffsets).reset(term.docIDs, term.freqs, term.positions, term.payloads);
- }
- }
}
private final class DirectIntersectTermsEnum extends TermsEnum {
@@ -1203,7 +1202,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
// if (DEBUG) {
// System.out.println(" term=" + new BytesRef(termBytes, termOffset, termLength).utf8ToString() + " skips=" + Arrays.toString(skips));
// }
-
+
assert termOrd < state.changeOrd;
assert stateUpto <= termLength: "term.length=" + termLength + "; stateUpto=" + stateUpto;
@@ -1336,7 +1335,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
compiledAutomaton.automaton.initTransition(nextState, states[stateUpto].transition);
states[stateUpto].transitionUpto = -1;
states[stateUpto].transitionMax = -1;
-
+
if (stateUpto == termLength) {
// if (DEBUG) {
// System.out.println(" term ends after push");
@@ -1453,9 +1452,23 @@ public final class DirectPostingsFormat extends PostingsFormat {
}
@Override
- public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) {
+ public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) {
// TODO: implement reuse
// it's hairy!
+ if ((flags & PostingsEnum.FLAG_POSITIONS) >= PostingsEnum.FLAG_POSITIONS) {
+ if (!hasPos) {
+ return null;
+ }
+ if (terms[termOrd] instanceof LowFreqTerm) {
+ final LowFreqTerm term = ((LowFreqTerm) terms[termOrd]);
+ final int[] postings = term.postings;
+ final byte[] payloads = term.payloads;
+ return new LowFreqPostingsEnum(liveDocs, hasOffsets, hasPayloads).reset(postings, payloads);
+ } else {
+ final HighFreqTerm term = (HighFreqTerm) terms[termOrd];
+ return new HighFreqPostingsEnum(liveDocs, hasOffsets).reset(term.docIDs, term.freqs, term.positions, term.payloads);
+ }
+ }
if (terms[termOrd] instanceof LowFreqTerm) {
final int[] postings = ((LowFreqTerm) terms[termOrd]).postings;
@@ -1484,26 +1497,6 @@ public final class DirectPostingsFormat extends PostingsFormat {
}
}
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) {
- if (!hasPos) {
- return null;
- }
-
- // TODO: implement reuse
- // it's hairy!
-
- if (terms[termOrd] instanceof LowFreqTerm) {
- final LowFreqTerm term = ((LowFreqTerm) terms[termOrd]);
- final int[] postings = term.postings;
- final byte[] payloads = term.payloads;
- return new LowFreqDocsAndPositionsEnum(liveDocs, hasOffsets, hasPayloads).reset(postings, payloads);
- } else {
- final HighFreqTerm term = (HighFreqTerm) terms[termOrd];
- return new HighFreqDocsAndPositionsEnum(liveDocs, hasOffsets).reset(term.docIDs, term.freqs, term.positions, term.payloads);
- }
- }
-
@Override
public SeekStatus seekCeil(BytesRef term) {
throw new UnsupportedOperationException();
@@ -1530,7 +1523,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
return liveDocs == this.liveDocs;
}
- public DocsEnum reset(int[] postings) {
+ public PostingsEnum reset(int[] postings) {
this.postings = postings;
upto = -1;
return this;
@@ -1572,13 +1565,19 @@ public final class DirectPostingsFormat extends PostingsFormat {
return 1;
}
+ @Override
+ public int nextPosition() throws IOException {
+ assert false : "Shouldn't be calling nextPosition() if you haven't asked for positions";
+ return -1;
+ }
+
@Override
public int advance(int target) throws IOException {
// Linear scan, but this is low-freq term so it won't
// be costly:
return slowAdvance(target);
}
-
+
@Override
public long cost() {
return postings.length;
@@ -1599,7 +1598,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
return liveDocs == this.liveDocs;
}
- public DocsEnum reset(int[] postings) {
+ public PostingsEnum reset(int[] postings) {
this.postings = postings;
upto = -2;
return this;
@@ -1640,13 +1639,19 @@ public final class DirectPostingsFormat extends PostingsFormat {
return postings[upto+1];
}
+ @Override
+ public int nextPosition() throws IOException {
+ assert false : "Shouldn't be calling nextPosition() if you haven't asked for positions";
+ return -1;
+ }
+
@Override
public int advance(int target) throws IOException {
// Linear scan, but this is low-freq term so it won't
// be costly:
return slowAdvance(target);
}
-
+
@Override
public long cost() {
return postings.length / 2;
@@ -1673,7 +1678,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
return liveDocs == this.liveDocs && posMult == this.posMult;
}
- public DocsEnum reset(int[] postings) {
+ public PostingsEnum reset(int[] postings) {
this.postings = postings;
upto = -2;
freq = 0;
@@ -1688,7 +1693,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
// System.out.println(" nextDoc freq=" + freq + " upto=" + upto + " vs " + postings.length);
// }
if (liveDocs == null) {
- if (upto < postings.length) {
+ if (upto < postings.length) {
freq = postings[upto+1];
assert freq > 0;
return postings[upto];
@@ -1724,13 +1729,19 @@ public final class DirectPostingsFormat extends PostingsFormat {
return freq;
}
+ @Override
+ public int nextPosition() throws IOException {
+ assert false : "should be using LowFreqDocsAndPositionsEnum";
+ return -1;
+ }
+
@Override
public int advance(int target) throws IOException {
// Linear scan, but this is low-freq term so it won't
// be costly:
return slowAdvance(target);
}
-
+
@Override
public long cost() {
// TODO: could do a better estimate
@@ -1738,7 +1749,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
}
}
- private final static class LowFreqDocsAndPositionsEnum extends DocsAndPositionsEnum {
+ private final static class LowFreqPostingsEnum extends PostingsEnum {
private int[] postings;
private final Bits liveDocs;
private final int posMult;
@@ -1749,6 +1760,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
private int docID;
private int freq;
private int skipPositions;
+ private int pos;
private int startOffset;
private int endOffset;
private int lastPayloadOffset;
@@ -1756,7 +1768,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
private int payloadLength;
private byte[] payloadBytes;
- public LowFreqDocsAndPositionsEnum(Bits liveDocs, boolean hasOffsets, boolean hasPayloads) {
+ public LowFreqPostingsEnum(Bits liveDocs, boolean hasOffsets, boolean hasPayloads) {
this.liveDocs = liveDocs;
this.hasOffsets = hasOffsets;
this.hasPayloads = hasPayloads;
@@ -1773,10 +1785,11 @@ public final class DirectPostingsFormat extends PostingsFormat {
}
}
- public DocsAndPositionsEnum reset(int[] postings, byte[] payloadBytes) {
+ public PostingsEnum reset(int[] postings, byte[] payloadBytes) {
this.postings = postings;
upto = 0;
skipPositions = 0;
+ pos = -1;
startOffset = -1;
endOffset = -1;
docID = -1;
@@ -1787,6 +1800,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
@Override
public int nextDoc() {
+ pos = -1;
if (hasPayloads) {
for(int i=0;i 0;
skipPositions--;
- final int pos = postings[upto++];
+ pos = postings[upto++];
if (hasOffsets) {
startOffset = postings[upto++];
endOffset = postings[upto++];
@@ -1884,7 +1898,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
return null;
}
}
-
+
@Override
public long cost() {
// TODO: could do a better estimate
@@ -1916,7 +1930,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
return freqs;
}
- public DocsEnum reset(int[] docIDs, int[] freqs) {
+ public PostingsEnum reset(int[] docIDs, int[] freqs) {
this.docIDs = docIDs;
this.freqs = freqs;
docID = upto = -1;
@@ -2063,7 +2077,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
return docID = docIDs[upto];
}
}
-
+
@Override
public long cost() {
return docIDs.length;
@@ -2071,7 +2085,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
}
// TODO: specialize offsets and not
- private final static class HighFreqDocsAndPositionsEnum extends DocsAndPositionsEnum {
+ private final static class HighFreqPostingsEnum extends PostingsEnum {
private int[] docIDs;
private int[] freqs;
private int[][] positions;
@@ -2084,7 +2098,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
private int posUpto;
private int[] curPositions;
- public HighFreqDocsAndPositionsEnum(Bits liveDocs, boolean hasOffsets) {
+ public HighFreqPostingsEnum(Bits liveDocs, boolean hasOffsets) {
this.liveDocs = liveDocs;
this.hasOffsets = hasOffsets;
posJump = hasOffsets ? 3 : 1;
@@ -2106,7 +2120,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
return liveDocs;
}
- public DocsAndPositionsEnum reset(int[] docIDs, int[] freqs, int[][] positions, byte[][][] payloads) {
+ public PostingsEnum reset(int[] docIDs, int[] freqs, int[][] positions, byte[][][] payloads) {
this.docIDs = docIDs;
this.freqs = freqs;
this.positions = positions;
@@ -2120,7 +2134,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
upto++;
if (liveDocs == null) {
if (upto < docIDs.length) {
- posUpto = -posJump;
+ posUpto = -posJump;
curPositions = positions[upto];
return docID = docIDs[upto];
}
@@ -2151,6 +2165,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
@Override
public int nextPosition() {
posUpto += posJump;
+ assert posUpto < curPositions.length;
return curPositions[posUpto];
}
@@ -2301,7 +2316,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
return payload;
}
}
-
+
@Override
public long cost() {
return docIDs.length;
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsReader.java
index b5030cec99e..9c1fcdff786 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsReader.java
@@ -32,8 +32,7 @@ import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.codecs.FieldsProducer;
import org.apache.lucene.codecs.PostingsReaderBase;
import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.IndexFileNames;
@@ -428,18 +427,9 @@ public class FSTOrdTermsReader extends FieldsProducer {
}
@Override
- public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
+ public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException {
decodeMetaData();
- return postingsReader.docs(fieldInfo, state, liveDocs, reuse, flags);
- }
-
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
- if (!hasPositions()) {
- return null;
- }
- decodeMetaData();
- return postingsReader.docsAndPositions(fieldInfo, state, liveDocs, reuse, flags);
+ return postingsReader.postings(fieldInfo, state, liveDocs, reuse, flags);
}
// TODO: this can be achieved by making use of Util.getByOutput()
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsReader.java
index 23065b26214..3060ff47fd6 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsReader.java
@@ -31,8 +31,7 @@ import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.codecs.FieldsProducer;
import org.apache.lucene.codecs.PostingsReaderBase;
import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.IndexFileNames;
@@ -291,18 +290,9 @@ public class FSTTermsReader extends FieldsProducer {
}
@Override
- public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
+ public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException {
decodeMetaData();
- return postingsReader.docs(fieldInfo, state, liveDocs, reuse, flags);
- }
-
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
- if (!hasPositions()) {
- return null;
- }
- decodeMetaData();
- return postingsReader.docsAndPositions(fieldInfo, state, liveDocs, reuse, flags);
+ return postingsReader.postings(fieldInfo, state, liveDocs, reuse, flags);
}
@Override
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryDocValuesProducer.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryDocValuesProducer.java
index f653606f8c0..ce47e4b8e83 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryDocValuesProducer.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryDocValuesProducer.java
@@ -31,8 +31,7 @@ import org.apache.lucene.codecs.DocValuesProducer;
import org.apache.lucene.index.BinaryDocValues;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.DocValues;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.IndexFileNames;
@@ -893,13 +892,9 @@ class MemoryDocValuesProducer extends DocValuesProducer {
}
@Override
- public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
+ public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException {
throw new UnsupportedOperationException();
}
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
- throw new UnsupportedOperationException();
- }
}
}
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java
index c7ce7e13969..a91dbed7554 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java
@@ -31,8 +31,8 @@ import org.apache.lucene.codecs.FieldsProducer;
import org.apache.lucene.codecs.PostingsFormat;
import org.apache.lucene.codecs.TermStats;
import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.Fields;
@@ -317,8 +317,8 @@ public final class MemoryPostingsFormat extends PostingsFormat {
FixedBitSet docsSeen = new FixedBitSet(state.segmentInfo.getDocCount());
long sumTotalTermFreq = 0;
long sumDocFreq = 0;
- DocsEnum docsEnum = null;
- DocsAndPositionsEnum posEnum = null;
+ PostingsEnum postingsEnum = null;
+ PostingsEnum posEnum = null;
int enumFlags;
IndexOptions indexOptions = fieldInfo.getIndexOptions();
@@ -330,18 +330,19 @@ public final class MemoryPostingsFormat extends PostingsFormat {
if (writeFreqs == false) {
enumFlags = 0;
} else if (writePositions == false) {
- enumFlags = DocsEnum.FLAG_FREQS;
+ enumFlags = PostingsEnum.FLAG_FREQS;
} else if (writeOffsets == false) {
if (writePayloads) {
- enumFlags = DocsAndPositionsEnum.FLAG_PAYLOADS;
- } else {
- enumFlags = 0;
+ enumFlags = PostingsEnum.FLAG_PAYLOADS;
+ }
+ else {
+ enumFlags = PostingsEnum.FLAG_POSITIONS;
}
} else {
if (writePayloads) {
- enumFlags = DocsAndPositionsEnum.FLAG_PAYLOADS | DocsAndPositionsEnum.FLAG_OFFSETS;
+ enumFlags = PostingsEnum.FLAG_PAYLOADS | PostingsEnum.FLAG_OFFSETS;
} else {
- enumFlags = DocsAndPositionsEnum.FLAG_OFFSETS;
+ enumFlags = PostingsEnum.FLAG_OFFSETS;
}
}
@@ -353,18 +354,18 @@ public final class MemoryPostingsFormat extends PostingsFormat {
termsWriter.postingsWriter.reset();
if (writePositions) {
- posEnum = termsEnum.docsAndPositions(null, posEnum, enumFlags);
- docsEnum = posEnum;
+ posEnum = termsEnum.postings(null, posEnum, enumFlags);
+ postingsEnum = posEnum;
} else {
- docsEnum = termsEnum.docs(null, docsEnum, enumFlags);
+ postingsEnum = termsEnum.postings(null, postingsEnum, enumFlags);
posEnum = null;
}
int docFreq = 0;
long totalTermFreq = 0;
while (true) {
- int docID = docsEnum.nextDoc();
- if (docID == DocsEnum.NO_MORE_DOCS) {
+ int docID = postingsEnum.nextDoc();
+ if (docID == PostingsEnum.NO_MORE_DOCS) {
break;
}
docsSeen.set(docID);
@@ -372,7 +373,7 @@ public final class MemoryPostingsFormat extends PostingsFormat {
int freq;
if (writeFreqs) {
- freq = docsEnum.freq();
+ freq = postingsEnum.freq();
totalTermFreq += freq;
} else {
freq = -1;
@@ -545,14 +546,14 @@ public final class MemoryPostingsFormat extends PostingsFormat {
public int freq() {
return freq;
}
-
+
@Override
public long cost() {
return numDocs;
}
}
- private final static class FSTDocsAndPositionsEnum extends DocsAndPositionsEnum {
+ private final static class FSTPostingsEnum extends PostingsEnum {
private final boolean storePayloads;
private byte[] buffer = new byte[16];
private final ByteArrayDataInput in = new ByteArrayDataInput(buffer);
@@ -572,7 +573,7 @@ public final class MemoryPostingsFormat extends PostingsFormat {
private int pos;
private final BytesRef payload = new BytesRef();
- public FSTDocsAndPositionsEnum(boolean storePayloads, boolean storeOffsets) {
+ public FSTPostingsEnum(boolean storePayloads, boolean storeOffsets) {
this.storePayloads = storePayloads;
this.storeOffsets = storeOffsets;
}
@@ -581,7 +582,7 @@ public final class MemoryPostingsFormat extends PostingsFormat {
return storePayloads == this.storePayloads && storeOffsets == this.storeOffsets;
}
- public FSTDocsAndPositionsEnum reset(BytesRef bufferIn, Bits liveDocs, int numDocs) {
+ public FSTPostingsEnum reset(BytesRef bufferIn, Bits liveDocs, int numDocs) {
assert numDocs > 0;
// System.out.println("D&P reset bytes this=" + this);
@@ -807,7 +808,27 @@ public final class MemoryPostingsFormat extends PostingsFormat {
}
@Override
- public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) {
+ public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) {
+
+ if ((flags & PostingsEnum.FLAG_POSITIONS) >= PostingsEnum.FLAG_POSITIONS) {
+ if (field.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
+ return null;
+ }
+ boolean hasOffsets = field.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
+ decodeMetaData();
+ FSTPostingsEnum docsAndPositionsEnum;
+ if (reuse == null || !(reuse instanceof FSTPostingsEnum)) {
+ docsAndPositionsEnum = new FSTPostingsEnum(field.hasPayloads(), hasOffsets);
+ } else {
+ docsAndPositionsEnum = (FSTPostingsEnum) reuse;
+ if (!docsAndPositionsEnum.canReuse(field.hasPayloads(), hasOffsets)) {
+ docsAndPositionsEnum = new FSTPostingsEnum(field.hasPayloads(), hasOffsets);
+ }
+ }
+ //System.out.println("D&P reset this=" + this);
+ return docsAndPositionsEnum.reset(postingsSpare, liveDocs, docFreq);
+ }
+
decodeMetaData();
FSTDocsEnum docsEnum;
@@ -822,27 +843,6 @@ public final class MemoryPostingsFormat extends PostingsFormat {
return docsEnum.reset(this.postingsSpare, liveDocs, docFreq);
}
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) {
-
- boolean hasOffsets = field.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
- if (field.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
- return null;
- }
- decodeMetaData();
- FSTDocsAndPositionsEnum docsAndPositionsEnum;
- if (reuse == null || !(reuse instanceof FSTDocsAndPositionsEnum)) {
- docsAndPositionsEnum = new FSTDocsAndPositionsEnum(field.hasPayloads(), hasOffsets);
- } else {
- docsAndPositionsEnum = (FSTDocsAndPositionsEnum) reuse;
- if (!docsAndPositionsEnum.canReuse(field.hasPayloads(), hasOffsets)) {
- docsAndPositionsEnum = new FSTDocsAndPositionsEnum(field.hasPayloads(), hasOffsets);
- }
- }
- //System.out.println("D&P reset this=" + this);
- return docsAndPositionsEnum.reset(postingsSpare, liveDocs, docFreq);
- }
-
@Override
public BytesRef term() {
return current.input;
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsReader.java
index 3a7591c80ce..36d3206aed1 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsReader.java
@@ -17,16 +17,6 @@ package org.apache.lucene.codecs.simpletext;
* limitations under the License.
*/
-import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.DOC;
-import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.END;
-import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.END_OFFSET;
-import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.FIELD;
-import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.FREQ;
-import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.PAYLOAD;
-import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.POS;
-import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.START_OFFSET;
-import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.TERM;
-
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.Collection;
@@ -37,11 +27,11 @@ import java.util.Map;
import java.util.TreeMap;
import org.apache.lucene.codecs.FieldsProducer;
-import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.IndexOptions;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.SegmentReadState;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
@@ -68,11 +58,21 @@ import org.apache.lucene.util.fst.PairOutputs;
import org.apache.lucene.util.fst.PositiveIntOutputs;
import org.apache.lucene.util.fst.Util;
+import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.DOC;
+import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.END;
+import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.END_OFFSET;
+import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.FIELD;
+import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.FREQ;
+import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.PAYLOAD;
+import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.POS;
+import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.START_OFFSET;
+import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.TERM;
+
class SimpleTextFieldsReader extends FieldsProducer {
private static final long BASE_RAM_BYTES_USED =
- RamUsageEstimator.shallowSizeOfInstance(SimpleTextFieldsReader.class)
- + RamUsageEstimator.shallowSizeOfInstance(TreeMap.class);
+ RamUsageEstimator.shallowSizeOfInstance(SimpleTextFieldsReader.class)
+ + RamUsageEstimator.shallowSizeOfInstance(TreeMap.class);
private final TreeMap fields;
private final IndexInput in;
@@ -93,12 +93,12 @@ class SimpleTextFieldsReader extends FieldsProducer {
}
}
}
-
+
private TreeMap readFields(IndexInput in) throws IOException {
ChecksumIndexInput input = new BufferedChecksumIndexInput(in);
BytesRefBuilder scratch = new BytesRefBuilder();
TreeMap fields = new TreeMap<>();
-
+
while (true) {
SimpleTextUtil.readLine(input, scratch);
if (scratch.get().equals(END)) {
@@ -206,9 +206,26 @@ class SimpleTextFieldsReader extends FieldsProducer {
public long totalTermFreq() {
return indexOptions == IndexOptions.DOCS ? -1 : totalTermFreq;
}
-
+
@Override
- public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
+ public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException {
+
+ if (PostingsEnum.requiresPositions(flags)) {
+ if (indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
+ // Positions were not indexed
+ return null;
+ }
+
+ SimpleTextPostingsEnum docsAndPositionsEnum;
+ if (reuse != null && reuse instanceof SimpleTextPostingsEnum && ((SimpleTextPostingsEnum) reuse).canReuse(SimpleTextFieldsReader.this.in)) {
+ docsAndPositionsEnum = (SimpleTextPostingsEnum) reuse;
+ } else {
+ docsAndPositionsEnum = new SimpleTextPostingsEnum();
+ }
+ return docsAndPositionsEnum.reset(docsStart, liveDocs, indexOptions, docFreq);
+
+ }
+
SimpleTextDocsEnum docsEnum;
if (reuse != null && reuse instanceof SimpleTextDocsEnum && ((SimpleTextDocsEnum) reuse).canReuse(SimpleTextFieldsReader.this.in)) {
docsEnum = (SimpleTextDocsEnum) reuse;
@@ -218,22 +235,6 @@ class SimpleTextFieldsReader extends FieldsProducer {
return docsEnum.reset(docsStart, liveDocs, indexOptions == IndexOptions.DOCS, docFreq);
}
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
-
- if (indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
- // Positions were not indexed
- return null;
- }
-
- SimpleTextDocsAndPositionsEnum docsAndPositionsEnum;
- if (reuse != null && reuse instanceof SimpleTextDocsAndPositionsEnum && ((SimpleTextDocsAndPositionsEnum) reuse).canReuse(SimpleTextFieldsReader.this.in)) {
- docsAndPositionsEnum = (SimpleTextDocsAndPositionsEnum) reuse;
- } else {
- docsAndPositionsEnum = new SimpleTextDocsAndPositionsEnum();
- }
- return docsAndPositionsEnum.reset(docsStart, liveDocs, indexOptions, docFreq);
- }
}
private class SimpleTextDocsEnum extends DocsEnum {
@@ -246,7 +247,7 @@ class SimpleTextFieldsReader extends FieldsProducer {
private final BytesRefBuilder scratch = new BytesRefBuilder();
private final CharsRefBuilder scratchUTF16 = new CharsRefBuilder();
private int cost;
-
+
public SimpleTextDocsEnum() {
this.inStart = SimpleTextFieldsReader.this.in;
this.in = this.inStart.clone();
@@ -276,6 +277,12 @@ class SimpleTextFieldsReader extends FieldsProducer {
return tf;
}
+ @Override
+ public int nextPosition() throws IOException {
+ assert false : "Shouldn't be calling nextPosition() if you haven't asked for positions";
+ return -1;
+ }
+
@Override
public int nextDoc() throws IOException {
if (docID == NO_MORE_DOCS) {
@@ -328,14 +335,14 @@ class SimpleTextFieldsReader extends FieldsProducer {
// Naive -- better to index skip data
return slowAdvance(target);
}
-
+
@Override
public long cost() {
return cost;
}
}
- private class SimpleTextDocsAndPositionsEnum extends DocsAndPositionsEnum {
+ private class SimpleTextPostingsEnum extends PostingsEnum {
private final IndexInput inStart;
private final IndexInput in;
private int docID = -1;
@@ -345,6 +352,7 @@ class SimpleTextFieldsReader extends FieldsProducer {
private final BytesRefBuilder scratch2 = new BytesRefBuilder();
private final CharsRefBuilder scratchUTF16 = new CharsRefBuilder();
private final CharsRefBuilder scratchUTF16_2 = new CharsRefBuilder();
+ private int pos;
private BytesRef payload;
private long nextDocStart;
private boolean readOffsets;
@@ -353,7 +361,7 @@ class SimpleTextFieldsReader extends FieldsProducer {
private int endOffset;
private int cost;
- public SimpleTextDocsAndPositionsEnum() {
+ public SimpleTextPostingsEnum() {
this.inStart = SimpleTextFieldsReader.this.in;
this.in = inStart.clone();
}
@@ -362,7 +370,7 @@ class SimpleTextFieldsReader extends FieldsProducer {
return in == inStart;
}
- public SimpleTextDocsAndPositionsEnum reset(long fp, Bits liveDocs, IndexOptions indexOptions, int docFreq) {
+ public SimpleTextPostingsEnum reset(long fp, Bits liveDocs, IndexOptions indexOptions, int docFreq) {
this.liveDocs = liveDocs;
nextDocStart = fp;
docID = -1;
@@ -437,7 +445,6 @@ class SimpleTextFieldsReader extends FieldsProducer {
@Override
public int nextPosition() throws IOException {
- final int pos;
if (readPositions) {
SimpleTextUtil.readLine(in, scratch);
assert StringHelper.startsWith(scratch.get(), POS): "got line=" + scratch.get().utf8ToString();
@@ -488,7 +495,7 @@ class SimpleTextFieldsReader extends FieldsProducer {
public BytesRef getPayload() {
return payload;
}
-
+
@Override
public long cost() {
return cost;
@@ -506,9 +513,9 @@ class SimpleTextFieldsReader extends FieldsProducer {
}
private static final long TERMS_BASE_RAM_BYTES_USED =
- RamUsageEstimator.shallowSizeOfInstance(SimpleTextTerms.class)
- + RamUsageEstimator.shallowSizeOfInstance(BytesRef.class)
- + RamUsageEstimator.shallowSizeOfInstance(CharsRef.class);
+ RamUsageEstimator.shallowSizeOfInstance(SimpleTextTerms.class)
+ + RamUsageEstimator.shallowSizeOfInstance(BytesRef.class)
+ + RamUsageEstimator.shallowSizeOfInstance(CharsRef.class);
private class SimpleTextTerms extends Terms implements Accountable {
private final long termsStart;
private final FieldInfo fieldInfo;
@@ -533,7 +540,7 @@ class SimpleTextFieldsReader extends FieldsProducer {
final Builder>> b;
final PairOutputs outputsInner = new PairOutputs<>(posIntOutputs, posIntOutputs);
final PairOutputs> outputs = new PairOutputs<>(posIntOutputs,
- outputsInner);
+ outputsInner);
b = new Builder<>(FST.INPUT_TYPE.BYTE1, outputs);
IndexInput in = SimpleTextFieldsReader.this.in.clone();
in.seek(termsStart);
@@ -548,8 +555,8 @@ class SimpleTextFieldsReader extends FieldsProducer {
if (scratch.get().equals(END) || StringHelper.startsWith(scratch.get(), FIELD)) {
if (lastDocsStart != -1) {
b.add(Util.toIntsRef(lastTerm.get(), scratchIntsRef),
- outputs.newPair(lastDocsStart,
- outputsInner.newPair((long) docFreq, totalTermFreq)));
+ outputs.newPair(lastDocsStart,
+ outputsInner.newPair((long) docFreq, totalTermFreq)));
sumTotalTermFreq += totalTermFreq;
}
break;
@@ -565,7 +572,7 @@ class SimpleTextFieldsReader extends FieldsProducer {
} else if (StringHelper.startsWith(scratch.get(), TERM)) {
if (lastDocsStart != -1) {
b.add(Util.toIntsRef(lastTerm.get(), scratchIntsRef), outputs.newPair(lastDocsStart,
- outputsInner.newPair((long) docFreq, totalTermFreq)));
+ outputsInner.newPair((long) docFreq, totalTermFreq)));
}
lastDocsStart = in.getFilePointer();
final int len = scratch.length() - TERM.length;
@@ -652,7 +659,7 @@ class SimpleTextFieldsReader extends FieldsProducer {
public boolean hasPositions() {
return fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0;
}
-
+
@Override
public boolean hasPayloads() {
return fieldInfo.hasPayloads();
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsWriter.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsWriter.java
index 656713d1c62..436a204b91c 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsWriter.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsWriter.java
@@ -20,8 +20,7 @@ package org.apache.lucene.codecs.simpletext;
import java.io.IOException;
import org.apache.lucene.codecs.FieldsConsumer;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.Fields;
@@ -33,7 +32,7 @@ import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
class SimpleTextFieldsWriter extends FieldsConsumer {
-
+
private IndexOutput out;
private final BytesRefBuilder scratch = new BytesRefBuilder();
private final SegmentWriteState writeState;
@@ -79,22 +78,21 @@ class SimpleTextFieldsWriter extends FieldsConsumer {
int flags = 0;
if (hasPositions) {
-
+ flags = PostingsEnum.FLAG_POSITIONS;
if (hasPayloads) {
- flags = flags | DocsAndPositionsEnum.FLAG_PAYLOADS;
+ flags = flags | PostingsEnum.FLAG_PAYLOADS;
}
if (hasOffsets) {
- flags = flags | DocsAndPositionsEnum.FLAG_OFFSETS;
+ flags = flags | PostingsEnum.FLAG_OFFSETS;
}
} else {
if (hasFreqs) {
- flags = flags | DocsEnum.FLAG_FREQS;
+ flags = flags | PostingsEnum.FLAG_FREQS;
}
}
TermsEnum termsEnum = terms.iterator(null);
- DocsAndPositionsEnum posEnum = null;
- DocsEnum docsEnum = null;
+ PostingsEnum postingsEnum = null;
// for each term in field
while(true) {
@@ -103,20 +101,16 @@ class SimpleTextFieldsWriter extends FieldsConsumer {
break;
}
- if (hasPositions) {
- posEnum = termsEnum.docsAndPositions(null, posEnum, flags);
- docsEnum = posEnum;
- } else {
- docsEnum = termsEnum.docs(null, docsEnum, flags);
- }
- assert docsEnum != null: "termsEnum=" + termsEnum + " hasPos=" + hasPositions + " flags=" + flags;
+ postingsEnum = termsEnum.postings(null, postingsEnum, flags);
+
+ assert postingsEnum != null: "termsEnum=" + termsEnum + " hasPos=" + hasPositions + " flags=" + flags;
boolean wroteTerm = false;
// for each doc in field+term
while(true) {
- int doc = docsEnum.nextDoc();
- if (doc == DocsEnum.NO_MORE_DOCS) {
+ int doc = postingsEnum.nextDoc();
+ if (doc == PostingsEnum.NO_MORE_DOCS) {
break;
}
@@ -143,7 +137,7 @@ class SimpleTextFieldsWriter extends FieldsConsumer {
write(Integer.toString(doc));
newline();
if (hasFreqs) {
- int freq = docsEnum.freq();
+ int freq = postingsEnum.freq();
write(FREQ);
write(Integer.toString(freq));
newline();
@@ -154,15 +148,15 @@ class SimpleTextFieldsWriter extends FieldsConsumer {
// for each pos in field+term+doc
for(int i=0;i= startOffset;
assert startOffset >= lastStartOffset: "startOffset=" + startOffset + " lastStartOffset=" + lastStartOffset;
lastStartOffset = startOffset;
@@ -174,7 +168,7 @@ class SimpleTextFieldsWriter extends FieldsConsumer {
newline();
}
- BytesRef payload = posEnum.getPayload();
+ BytesRef payload = postingsEnum.getPayload();
if (payload != null && payload.length > 0) {
assert payload.length != 0;
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsReader.java
index 641ff6c1703..ebe143cb78c 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsReader.java
@@ -25,8 +25,8 @@ import java.util.SortedMap;
import java.util.TreeMap;
import org.apache.lucene.codecs.TermVectorsReader;
-import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.index.SegmentInfo;
@@ -59,15 +59,15 @@ import static org.apache.lucene.codecs.simpletext.SimpleTextTermVectorsWriter.*;
public class SimpleTextTermVectorsReader extends TermVectorsReader {
private static final long BASE_RAM_BYTES_USED =
- RamUsageEstimator.shallowSizeOfInstance(SimpleTextTermVectorsReader.class)
- + RamUsageEstimator.shallowSizeOfInstance(BytesRef.class)
- + RamUsageEstimator.shallowSizeOfInstance(CharsRef.class);
+ RamUsageEstimator.shallowSizeOfInstance(SimpleTextTermVectorsReader.class)
+ + RamUsageEstimator.shallowSizeOfInstance(BytesRef.class)
+ + RamUsageEstimator.shallowSizeOfInstance(CharsRef.class);
private long offsets[]; /* docid -> offset in .vec file */
private IndexInput in;
private BytesRefBuilder scratch = new BytesRefBuilder();
private CharsRefBuilder scratchUTF16 = new CharsRefBuilder();
-
+
public SimpleTextTermVectorsReader(Directory directory, SegmentInfo si, IOContext context) throws IOException {
boolean success = false;
try {
@@ -82,15 +82,15 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
}
readIndex(si.getDocCount());
}
-
+
// used by clone
SimpleTextTermVectorsReader(long offsets[], IndexInput in) {
this.offsets = offsets;
this.in = in;
}
-
- // we don't actually write a .tvx-like index, instead we read the
- // vectors file in entirety up-front and save the offsets
+
+ // we don't actually write a .tvx-like index, instead we read the
+ // vectors file in entirety up-front and save the offsets
// so we can seek to the data later.
private void readIndex(int maxDoc) throws IOException {
ChecksumIndexInput input = new BufferedChecksumIndexInput(in);
@@ -106,7 +106,7 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
SimpleTextUtil.checkFooter(input);
assert upto == offsets.length;
}
-
+
@Override
public Fields get(int doc) throws IOException {
SortedMap fields = new TreeMap<>();
@@ -122,30 +122,30 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
assert StringHelper.startsWith(scratch.get(), FIELD);
// skip fieldNumber:
parseIntAt(FIELD.length);
-
+
readLine();
assert StringHelper.startsWith(scratch.get(), FIELDNAME);
String fieldName = readString(FIELDNAME.length, scratch);
-
+
readLine();
assert StringHelper.startsWith(scratch.get(), FIELDPOSITIONS);
boolean positions = Boolean.parseBoolean(readString(FIELDPOSITIONS.length, scratch));
-
+
readLine();
assert StringHelper.startsWith(scratch.get(), FIELDOFFSETS);
boolean offsets = Boolean.parseBoolean(readString(FIELDOFFSETS.length, scratch));
-
+
readLine();
assert StringHelper.startsWith(scratch.get(), FIELDPAYLOADS);
boolean payloads = Boolean.parseBoolean(readString(FIELDPAYLOADS.length, scratch));
-
+
readLine();
assert StringHelper.startsWith(scratch.get(), FIELDTERMCOUNT);
int termCount = parseIntAt(FIELDTERMCOUNT.length);
-
+
SimpleTVTerms terms = new SimpleTVTerms(offsets, positions, payloads);
fields.put(fieldName, terms);
-
+
BytesRefBuilder term = new BytesRefBuilder();
for (int j = 0; j < termCount; j++) {
readLine();
@@ -154,14 +154,14 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
term.grow(termLength);
term.setLength(termLength);
System.arraycopy(scratch.bytes(), TERMTEXT.length, term.bytes(), 0, termLength);
-
+
SimpleTVPostings postings = new SimpleTVPostings();
terms.terms.put(term.toBytesRef(), postings);
-
+
readLine();
assert StringHelper.startsWith(scratch.get(), TERMFREQ);
postings.freq = parseIntAt(TERMFREQ.length);
-
+
if (positions || offsets) {
if (positions) {
postings.positions = new int[postings.freq];
@@ -169,12 +169,12 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
postings.payloads = new BytesRef[postings.freq];
}
}
-
+
if (offsets) {
postings.startOffsets = new int[postings.freq];
postings.endOffsets = new int[postings.freq];
}
-
+
for (int k = 0; k < postings.freq; k++) {
if (positions) {
readLine();
@@ -192,12 +192,12 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
}
}
}
-
+
if (offsets) {
readLine();
assert StringHelper.startsWith(scratch.get(), STARTOFFSET);
postings.startOffsets[k] = parseIntAt(STARTOFFSET.length);
-
+
readLine();
assert StringHelper.startsWith(scratch.get(), ENDOFFSET);
postings.endOffsets[k] = parseIntAt(ENDOFFSET.length);
@@ -216,11 +216,11 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
}
return new SimpleTextTermVectorsReader(offsets, in.clone());
}
-
+
@Override
public void close() throws IOException {
try {
- IOUtils.close(in);
+ IOUtils.close(in);
} finally {
in = null;
offsets = null;
@@ -230,20 +230,20 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
private void readLine() throws IOException {
SimpleTextUtil.readLine(in, scratch);
}
-
+
private int parseIntAt(int offset) {
scratchUTF16.copyUTF8Bytes(scratch.bytes(), offset, scratch.length()-offset);
return ArrayUtil.parseInt(scratchUTF16.chars(), 0, scratchUTF16.length());
}
-
+
private String readString(int offset, BytesRefBuilder scratch) {
scratchUTF16.copyUTF8Bytes(scratch.bytes(), offset, scratch.length()-offset);
return scratchUTF16.toString();
}
-
+
private class SimpleTVFields extends Fields {
private final SortedMap fields;
-
+
SimpleTVFields(SortedMap fields) {
this.fields = fields;
}
@@ -263,20 +263,20 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
return fields.size();
}
}
-
+
private static class SimpleTVTerms extends Terms {
final SortedMap terms;
final boolean hasOffsets;
final boolean hasPositions;
final boolean hasPayloads;
-
+
SimpleTVTerms(boolean hasOffsets, boolean hasPositions, boolean hasPayloads) {
this.hasOffsets = hasOffsets;
this.hasPositions = hasPositions;
this.hasPayloads = hasPayloads;
terms = new TreeMap<>();
}
-
+
@Override
public TermsEnum iterator(TermsEnum reuse) throws IOException {
// TODO: reuse
@@ -317,13 +317,13 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
public boolean hasPositions() {
return hasPositions;
}
-
+
@Override
public boolean hasPayloads() {
return hasPayloads;
}
}
-
+
private static class SimpleTVPostings {
private int freq;
private int positions[];
@@ -331,17 +331,17 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
private int endOffsets[];
private BytesRef payloads[];
}
-
+
private static class SimpleTVTermsEnum extends TermsEnum {
SortedMap terms;
Iterator> iterator;
Map.Entry current;
-
+
SimpleTVTermsEnum(SortedMap terms) {
this.terms = terms;
this.iterator = terms.entrySet().iterator();
}
-
+
@Override
public SeekStatus seekCeil(BytesRef text) throws IOException {
iterator = terms.tailMap(text).entrySet().iterator();
@@ -388,26 +388,27 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
}
@Override
- public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
+ public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException {
+
+ if (PostingsEnum.requiresPositions(flags)) {
+ SimpleTVPostings postings = current.getValue();
+ if (postings.positions == null && postings.startOffsets == null) {
+ return null;
+ }
+ // TODO: reuse
+ SimpleTVPostingsEnum e = new SimpleTVPostingsEnum();
+ e.reset(liveDocs, postings.positions, postings.startOffsets, postings.endOffsets, postings.payloads);
+ return e;
+ }
+
// TODO: reuse
SimpleTVDocsEnum e = new SimpleTVDocsEnum();
- e.reset(liveDocs, (flags & DocsEnum.FLAG_FREQS) == 0 ? 1 : current.getValue().freq);
+ e.reset(liveDocs, (flags & PostingsEnum.FLAG_FREQS) == 0 ? 1 : current.getValue().freq);
return e;
}
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
- SimpleTVPostings postings = current.getValue();
- if (postings.positions == null && postings.startOffsets == null) {
- return null;
- }
- // TODO: reuse
- SimpleTVDocsAndPositionsEnum e = new SimpleTVDocsAndPositionsEnum();
- e.reset(liveDocs, postings.positions, postings.startOffsets, postings.endOffsets, postings.payloads);
- return e;
- }
}
-
+
// note: these two enum classes are exactly like the Default impl...
private static class SimpleTVDocsEnum extends DocsEnum {
private boolean didNext;
@@ -421,6 +422,12 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
return freq;
}
+ @Override
+ public int nextPosition() throws IOException {
+ assert false;
+ return -1;
+ }
+
@Override
public int docID() {
return doc;
@@ -447,14 +454,14 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
this.doc = -1;
didNext = false;
}
-
+
@Override
public long cost() {
return 1;
}
}
-
- private static class SimpleTVDocsAndPositionsEnum extends DocsAndPositionsEnum {
+
+ private static class SimpleTVPostingsEnum extends PostingsEnum {
private boolean didNext;
private int doc = -1;
private int nextPos;
@@ -512,11 +519,11 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
@Override
public int nextPosition() {
- assert (positions != null && nextPos < positions.length) ||
- startOffsets != null && nextPos < startOffsets.length;
if (positions != null) {
+ assert nextPos < positions.length;
return positions[nextPos++];
} else {
+ assert nextPos < startOffsets.length;
nextPos++;
return -1;
}
@@ -539,7 +546,7 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
return endOffsets[nextPos-1];
}
}
-
+
@Override
public long cost() {
return 1;
@@ -550,7 +557,7 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
public long ramBytesUsed() {
return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(offsets);
}
-
+
@Override
public String toString() {
return getClass().getSimpleName();
diff --git a/lucene/codecs/src/test/org/apache/lucene/codecs/memory/TestDirectPostingsFormat.java b/lucene/codecs/src/test/org/apache/lucene/codecs/memory/TestDirectPostingsFormat.java
index 324090304e0..afd261c98f4 100644
--- a/lucene/codecs/src/test/org/apache/lucene/codecs/memory/TestDirectPostingsFormat.java
+++ b/lucene/codecs/src/test/org/apache/lucene/codecs/memory/TestDirectPostingsFormat.java
@@ -28,6 +28,11 @@ public class TestDirectPostingsFormat extends BasePostingsFormatTestCase {
// TODO: randomize parameters
private final Codec codec = TestUtil.alwaysPostingsFormat(new DirectPostingsFormat());
+ @Override
+ protected boolean isPostingsEnumReuseImplemented() {
+ return false;
+ }
+
@Override
protected Codec getCodec() {
return codec;
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/Token.java b/lucene/core/src/java/org/apache/lucene/analysis/Token.java
index f0a66f5cfbe..cdb84828d9f 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/Token.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/Token.java
@@ -20,7 +20,7 @@ package org.apache.lucene.analysis;
import org.apache.lucene.analysis.tokenattributes.FlagsAttribute;
import org.apache.lucene.analysis.tokenattributes.PackedTokenAttributeImpl;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
-import org.apache.lucene.index.DocsAndPositionsEnum; // for javadoc
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.util.Attribute;
import org.apache.lucene.util.AttributeFactory;
import org.apache.lucene.util.AttributeImpl;
@@ -43,7 +43,7 @@ import org.apache.lucene.util.BytesRef;
with type "eos". The default token type is "word".
A Token can optionally have metadata (a.k.a. payload) in the form of a variable
- length byte array. Use {@link DocsAndPositionsEnum#getPayload()} to retrieve the
+ length byte array. Use {@link org.apache.lucene.index.PostingsEnum#getPayload()} to retrieve the
payloads from the index.
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PayloadAttribute.java b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PayloadAttribute.java
index daf6d00b5fa..f4d09a613bb 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PayloadAttribute.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PayloadAttribute.java
@@ -17,7 +17,6 @@ package org.apache.lucene.analysis.tokenattributes;
* limitations under the License.
*/
-import org.apache.lucene.index.DocsAndPositionsEnum; // javadocs
import org.apache.lucene.util.Attribute;
import org.apache.lucene.util.BytesRef;
@@ -33,7 +32,7 @@ import org.apache.lucene.util.BytesRef;
* best to use the minimum number of bytes necessary. Some codec implementations
* may optimize payload storage when all payloads have the same length.
*
- * @see DocsAndPositionsEnum
+ * @see org.apache.lucene.index.PostingsEnum
*/
public interface PayloadAttribute extends Attribute {
/**
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PositionIncrementAttribute.java b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PositionIncrementAttribute.java
index 9afd2f9b838..807987b26b2 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PositionIncrementAttribute.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PositionIncrementAttribute.java
@@ -43,7 +43,7 @@ import org.apache.lucene.util.Attribute;
*
*
*
- * @see org.apache.lucene.index.DocsAndPositionsEnum
+ * @see org.apache.lucene.index.PostingsEnum
*/
public interface PositionIncrementAttribute extends Attribute {
/** Set the position increment. The default value is one.
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/BlockTermState.java b/lucene/core/src/java/org/apache/lucene/codecs/BlockTermState.java
index e6d7a92ddaa..0f133e12207 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/BlockTermState.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/BlockTermState.java
@@ -16,13 +16,12 @@ package org.apache.lucene.codecs;
* limitations under the License.
*/
-import org.apache.lucene.index.DocsEnum; // javadocs
import org.apache.lucene.index.OrdTermState;
import org.apache.lucene.index.TermState;
/**
* Holds all state required for {@link PostingsReaderBase}
- * to produce a {@link DocsEnum} without re-seeking the
+ * to produce a {@link org.apache.lucene.index.PostingsEnum} without re-seeking the
* terms dict.
*/
public class BlockTermState extends OrdTermState {
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/PostingsReaderBase.java b/lucene/core/src/java/org/apache/lucene/codecs/PostingsReaderBase.java
index 5681c1904bc..baf69c0b441 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/PostingsReaderBase.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/PostingsReaderBase.java
@@ -20,8 +20,7 @@ package org.apache.lucene.codecs;
import java.io.Closeable;
import java.io.IOException;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.SegmentReadState;
import org.apache.lucene.store.DataInput;
@@ -31,8 +30,8 @@ import org.apache.lucene.util.Bits;
/** The core terms dictionaries (BlockTermsReader,
* BlockTreeTermsReader) interact with a single instance
- * of this class to manage creation of {@link DocsEnum} and
- * {@link DocsAndPositionsEnum} instances. It provides an
+ * of this class to manage creation of {@link org.apache.lucene.index.PostingsEnum} and
+ * {@link org.apache.lucene.index.PostingsEnum} instances. It provides an
* IndexInput (termsIn) where this class may read any
* previously stored data that it had written in its
* corresponding {@link PostingsWriterBase} at indexing
@@ -66,12 +65,7 @@ public abstract class PostingsReaderBase implements Closeable, Accountable {
/** Must fully consume state, since after this call that
* TermState may be reused. */
- public abstract DocsEnum docs(FieldInfo fieldInfo, BlockTermState state, Bits skipDocs, DocsEnum reuse, int flags) throws IOException;
-
- /** Must fully consume state, since after this call that
- * TermState may be reused. */
- public abstract DocsAndPositionsEnum docsAndPositions(FieldInfo fieldInfo, BlockTermState state, Bits skipDocs, DocsAndPositionsEnum reuse,
- int flags) throws IOException;
+ public abstract PostingsEnum postings(FieldInfo fieldInfo, BlockTermState state, Bits skipDocs, PostingsEnum reuse, int flags) throws IOException;
/**
* Checks consistency of this reader.
@@ -81,7 +75,7 @@ public abstract class PostingsReaderBase implements Closeable, Accountable {
* @lucene.internal
*/
public abstract void checkIntegrity() throws IOException;
-
+
@Override
public abstract void close() throws IOException;
}
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/PostingsWriterBase.java b/lucene/core/src/java/org/apache/lucene/codecs/PostingsWriterBase.java
index 0dc7bb5b41c..4df8f4ebcf8 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/PostingsWriterBase.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/PostingsWriterBase.java
@@ -17,12 +17,7 @@ package org.apache.lucene.codecs;
* limitations under the License.
*/
-import java.io.Closeable;
-import java.io.IOException;
-
import org.apache.lucene.codecs.blocktree.BlockTreeTermsWriter;
-import org.apache.lucene.index.DocsAndPositionsEnum; // javadocs
-import org.apache.lucene.index.DocsEnum; // javadocs
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.SegmentWriteState;
import org.apache.lucene.index.TermsEnum;
@@ -31,6 +26,9 @@ import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FixedBitSet;
+import java.io.Closeable;
+import java.io.IOException;
+
/**
* Class that plugs into term dictionaries, such as {@link
* BlockTreeTermsWriter}, and handles writing postings.
@@ -54,8 +52,8 @@ public abstract class PostingsWriterBase implements Closeable {
public abstract void init(IndexOutput termsOut, SegmentWriteState state) throws IOException;
/** Write all postings for one term; use the provided
- * {@link TermsEnum} to pull a {@link DocsEnum} or {@link
- * DocsAndPositionsEnum}. This method should not
+ * {@link TermsEnum} to pull a {@link org.apache.lucene.index.PostingsEnum}.
+ * This method should not
* re-position the {@code TermsEnum}! It is already
* positioned on the term that should be written. This
* method must set the bit in the provided {@link
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/PushPostingsWriterBase.java b/lucene/core/src/java/org/apache/lucene/codecs/PushPostingsWriterBase.java
index 35ebba175ad..10f9032a0bd 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/PushPostingsWriterBase.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/PushPostingsWriterBase.java
@@ -19,8 +19,7 @@ package org.apache.lucene.codecs;
import java.io.IOException;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.TermsEnum;
@@ -42,8 +41,7 @@ import org.apache.lucene.util.FixedBitSet;
public abstract class PushPostingsWriterBase extends PostingsWriterBase {
// Reused in writeTerm
- private DocsEnum docsEnum;
- private DocsAndPositionsEnum posEnum;
+ private PostingsEnum postingsEnum;
private int enumFlags;
/** {@link FieldInfo} of current field being written. */
@@ -100,18 +98,18 @@ public abstract class PushPostingsWriterBase extends PostingsWriterBase {
if (writeFreqs == false) {
enumFlags = 0;
} else if (writePositions == false) {
- enumFlags = DocsEnum.FLAG_FREQS;
+ enumFlags = PostingsEnum.FLAG_FREQS;
} else if (writeOffsets == false) {
if (writePayloads) {
- enumFlags = DocsAndPositionsEnum.FLAG_PAYLOADS;
+ enumFlags = PostingsEnum.FLAG_PAYLOADS;
} else {
- enumFlags = 0;
+ enumFlags = PostingsEnum.FLAG_POSITIONS;
}
} else {
if (writePayloads) {
- enumFlags = DocsAndPositionsEnum.FLAG_PAYLOADS | DocsAndPositionsEnum.FLAG_OFFSETS;
+ enumFlags = PostingsEnum.FLAG_PAYLOADS | PostingsEnum.FLAG_OFFSETS;
} else {
- enumFlags = DocsAndPositionsEnum.FLAG_OFFSETS;
+ enumFlags = PostingsEnum.FLAG_OFFSETS;
}
}
@@ -121,26 +119,21 @@ public abstract class PushPostingsWriterBase extends PostingsWriterBase {
@Override
public final BlockTermState writeTerm(BytesRef term, TermsEnum termsEnum, FixedBitSet docsSeen) throws IOException {
startTerm();
- if (writePositions == false) {
- docsEnum = termsEnum.docs(null, docsEnum, enumFlags);
- } else {
- posEnum = termsEnum.docsAndPositions(null, posEnum, enumFlags);
- docsEnum = posEnum;
- }
- assert docsEnum != null;
+ postingsEnum = termsEnum.postings(null, postingsEnum, enumFlags);
+ assert postingsEnum != null;
int docFreq = 0;
long totalTermFreq = 0;
while (true) {
- int docID = docsEnum.nextDoc();
- if (docID == DocsEnum.NO_MORE_DOCS) {
+ int docID = postingsEnum.nextDoc();
+ if (docID == PostingsEnum.NO_MORE_DOCS) {
break;
}
docFreq++;
docsSeen.set(docID);
int freq;
if (writeFreqs) {
- freq = docsEnum.freq();
+ freq = postingsEnum.freq();
totalTermFreq += freq;
} else {
freq = -1;
@@ -149,13 +142,13 @@ public abstract class PushPostingsWriterBase extends PostingsWriterBase {
if (writePositions) {
for(int i=0;i= 0;
+ assert !hasPositions || pos >= 0 ;
addPosition(pos, startOffset, endOffset, payload);
}
}
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/IntersectTermsEnum.java b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/IntersectTermsEnum.java
index 952d226afd3..482cb6a248b 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/IntersectTermsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/IntersectTermsEnum.java
@@ -19,9 +19,7 @@ package org.apache.lucene.codecs.blocktree;
import java.io.IOException;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
-import org.apache.lucene.index.IndexOptions;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.TermState;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.IndexInput;
@@ -203,20 +201,9 @@ final class IntersectTermsEnum extends TermsEnum {
}
@Override
- public DocsEnum docs(Bits skipDocs, DocsEnum reuse, int flags) throws IOException {
+ public PostingsEnum postings(Bits skipDocs, PostingsEnum reuse, int flags) throws IOException {
currentFrame.decodeMetaData();
- return fr.parent.postingsReader.docs(fr.fieldInfo, currentFrame.termState, skipDocs, reuse, flags);
- }
-
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits skipDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
- if (fr.fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
- // Positions were not indexed:
- return null;
- }
-
- currentFrame.decodeMetaData();
- return fr.parent.postingsReader.docsAndPositions(fr.fieldInfo, currentFrame.termState, skipDocs, reuse, flags);
+ return fr.parent.postingsReader.postings(fr.fieldInfo, currentFrame.termState, skipDocs, reuse, flags);
}
private int getState() {
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/SegmentTermsEnum.java b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/SegmentTermsEnum.java
index df67b07e4bf..2c4fff98f7c 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/SegmentTermsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/SegmentTermsEnum.java
@@ -21,9 +21,7 @@ import java.io.IOException;
import java.io.PrintStream;
import org.apache.lucene.codecs.BlockTermState;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
-import org.apache.lucene.index.IndexOptions;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.TermState;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.ByteArrayDataInput;
@@ -981,7 +979,7 @@ final class SegmentTermsEnum extends TermsEnum {
}
@Override
- public DocsEnum docs(Bits skipDocs, DocsEnum reuse, int flags) throws IOException {
+ public PostingsEnum postings(Bits skipDocs, PostingsEnum reuse, int flags) throws IOException {
assert !eof;
//if (DEBUG) {
//System.out.println("BTTR.docs seg=" + segment);
@@ -990,19 +988,7 @@ final class SegmentTermsEnum extends TermsEnum {
//if (DEBUG) {
//System.out.println(" state=" + currentFrame.state);
//}
- return fr.parent.postingsReader.docs(fr.fieldInfo, currentFrame.state, skipDocs, reuse, flags);
- }
-
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits skipDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
- if (fr.fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
- // Positions were not indexed:
- return null;
- }
-
- assert !eof;
- currentFrame.decodeMetaData();
- return fr.parent.postingsReader.docsAndPositions(fr.fieldInfo, currentFrame.state, skipDocs, reuse, flags);
+ return fr.parent.postingsReader.postings(fr.fieldInfo, currentFrame.state, skipDocs, reuse, flags);
}
@Override
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsReader.java b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsReader.java
index 5ff54fc8481..88f1f24f36a 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsReader.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsReader.java
@@ -17,19 +17,6 @@ package org.apache.lucene.codecs.compressing;
* limitations under the License.
*/
-import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.PACKED_BLOCK_SIZE;
-import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.CODEC_SFX_DAT;
-import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.CODEC_SFX_IDX;
-import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.FLAGS_BITS;
-import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.OFFSETS;
-import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.PAYLOADS;
-import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.POSITIONS;
-import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.VECTORS_EXTENSION;
-import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.VECTORS_INDEX_EXTENSION;
-import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.VERSION_CHUNK_STATS;
-import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.VERSION_CURRENT;
-import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.VERSION_START;
-
import java.io.Closeable;
import java.io.IOException;
import java.util.Collection;
@@ -40,8 +27,7 @@ import java.util.NoSuchElementException;
import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.codecs.TermVectorsReader;
import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.Fields;
@@ -65,6 +51,18 @@ import org.apache.lucene.util.LongsRef;
import org.apache.lucene.util.packed.BlockPackedReaderIterator;
import org.apache.lucene.util.packed.PackedInts;
+import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.CODEC_SFX_DAT;
+import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.CODEC_SFX_IDX;
+import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.FLAGS_BITS;
+import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.OFFSETS;
+import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.PACKED_BLOCK_SIZE;
+import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.PAYLOADS;
+import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.POSITIONS;
+import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.VECTORS_EXTENSION;
+import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.VECTORS_INDEX_EXTENSION;
+import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.VERSION_CHUNK_STATS;
+import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.VERSION_CURRENT;
+import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.VERSION_START;
/**
* {@link TermVectorsReader} for {@link CompressingTermVectorsFormat}.
@@ -937,30 +935,27 @@ public final class CompressingTermVectorsReader extends TermVectorsReader implem
}
@Override
- public final DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
- final TVDocsEnum docsEnum;
- if (reuse != null && reuse instanceof TVDocsEnum) {
- docsEnum = (TVDocsEnum) reuse;
+ public final PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException {
+
+ if ((flags & PostingsEnum.FLAG_POSITIONS) >= PostingsEnum.FLAG_POSITIONS) {
+ if (positions == null && startOffsets == null)
+ return null;
+ }
+
+ final TVPostingsEnum docsEnum;
+ if (reuse != null && reuse instanceof TVPostingsEnum) {
+ docsEnum = (TVPostingsEnum) reuse;
} else {
- docsEnum = new TVDocsEnum();
+ docsEnum = new TVPostingsEnum();
}
docsEnum.reset(liveDocs, termFreqs[ord], positionIndex[ord], positions, startOffsets, lengths, payloads, payloadIndex);
return docsEnum;
}
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
- if (positions == null && startOffsets == null) {
- return null;
- }
- // TODO: slightly sheisty
- return (DocsAndPositionsEnum) docs(liveDocs, reuse, flags);
- }
-
}
- private static class TVDocsEnum extends DocsAndPositionsEnum {
+ private static class TVPostingsEnum extends PostingsEnum {
private Bits liveDocs;
private int doc = -1;
@@ -974,7 +969,7 @@ public final class CompressingTermVectorsReader extends TermVectorsReader implem
private int basePayloadOffset;
private int i;
- TVDocsEnum() {
+ TVPostingsEnum() {
payload = new BytesRef();
}
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50DocValuesProducer.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50DocValuesProducer.java
index 51f0d7977b5..c1fb5f59b24 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50DocValuesProducer.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50DocValuesProducer.java
@@ -17,27 +17,7 @@ package org.apache.lucene.codecs.lucene50;
* limitations under the License.
*/
-import static org.apache.lucene.codecs.lucene50.Lucene50DocValuesConsumer.ALL_LIVE;
-import static org.apache.lucene.codecs.lucene50.Lucene50DocValuesConsumer.ALL_MISSING;
-import static org.apache.lucene.codecs.lucene50.Lucene50DocValuesConsumer.BINARY_FIXED_UNCOMPRESSED;
-import static org.apache.lucene.codecs.lucene50.Lucene50DocValuesConsumer.BINARY_PREFIX_COMPRESSED;
-import static org.apache.lucene.codecs.lucene50.Lucene50DocValuesConsumer.BINARY_VARIABLE_UNCOMPRESSED;
-import static org.apache.lucene.codecs.lucene50.Lucene50DocValuesConsumer.BLOCK_INTERVAL_MASK;
-import static org.apache.lucene.codecs.lucene50.Lucene50DocValuesConsumer.BLOCK_INTERVAL_SHIFT;
-import static org.apache.lucene.codecs.lucene50.Lucene50DocValuesConsumer.CONST_COMPRESSED;
-import static org.apache.lucene.codecs.lucene50.Lucene50DocValuesConsumer.DELTA_COMPRESSED;
-import static org.apache.lucene.codecs.lucene50.Lucene50DocValuesConsumer.GCD_COMPRESSED;
-import static org.apache.lucene.codecs.lucene50.Lucene50DocValuesConsumer.INTERVAL_COUNT;
-import static org.apache.lucene.codecs.lucene50.Lucene50DocValuesConsumer.INTERVAL_MASK;
-import static org.apache.lucene.codecs.lucene50.Lucene50DocValuesConsumer.INTERVAL_SHIFT;
-import static org.apache.lucene.codecs.lucene50.Lucene50DocValuesConsumer.MONOTONIC_COMPRESSED;
-import static org.apache.lucene.codecs.lucene50.Lucene50DocValuesConsumer.REVERSE_INTERVAL_MASK;
-import static org.apache.lucene.codecs.lucene50.Lucene50DocValuesConsumer.REVERSE_INTERVAL_SHIFT;
-import static org.apache.lucene.codecs.lucene50.Lucene50DocValuesConsumer.SORTED_SINGLE_VALUED;
-import static org.apache.lucene.codecs.lucene50.Lucene50DocValuesConsumer.SORTED_WITH_ADDRESSES;
-import static org.apache.lucene.codecs.lucene50.Lucene50DocValuesConsumer.TABLE_COMPRESSED;
-
-import java.io.Closeable; // javadocs
+import java.io.Closeable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
@@ -52,8 +32,7 @@ import org.apache.lucene.codecs.DocValuesProducer;
import org.apache.lucene.index.BinaryDocValues;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.DocValues;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.IndexFileNames;
@@ -78,6 +57,26 @@ import org.apache.lucene.util.RamUsageEstimator;
import org.apache.lucene.util.packed.DirectReader;
import org.apache.lucene.util.packed.MonotonicBlockPackedReader;
+import static org.apache.lucene.codecs.lucene50.Lucene50DocValuesConsumer.ALL_LIVE;
+import static org.apache.lucene.codecs.lucene50.Lucene50DocValuesConsumer.ALL_MISSING;
+import static org.apache.lucene.codecs.lucene50.Lucene50DocValuesConsumer.BINARY_FIXED_UNCOMPRESSED;
+import static org.apache.lucene.codecs.lucene50.Lucene50DocValuesConsumer.BINARY_PREFIX_COMPRESSED;
+import static org.apache.lucene.codecs.lucene50.Lucene50DocValuesConsumer.BINARY_VARIABLE_UNCOMPRESSED;
+import static org.apache.lucene.codecs.lucene50.Lucene50DocValuesConsumer.BLOCK_INTERVAL_MASK;
+import static org.apache.lucene.codecs.lucene50.Lucene50DocValuesConsumer.BLOCK_INTERVAL_SHIFT;
+import static org.apache.lucene.codecs.lucene50.Lucene50DocValuesConsumer.CONST_COMPRESSED;
+import static org.apache.lucene.codecs.lucene50.Lucene50DocValuesConsumer.DELTA_COMPRESSED;
+import static org.apache.lucene.codecs.lucene50.Lucene50DocValuesConsumer.GCD_COMPRESSED;
+import static org.apache.lucene.codecs.lucene50.Lucene50DocValuesConsumer.INTERVAL_COUNT;
+import static org.apache.lucene.codecs.lucene50.Lucene50DocValuesConsumer.INTERVAL_MASK;
+import static org.apache.lucene.codecs.lucene50.Lucene50DocValuesConsumer.INTERVAL_SHIFT;
+import static org.apache.lucene.codecs.lucene50.Lucene50DocValuesConsumer.MONOTONIC_COMPRESSED;
+import static org.apache.lucene.codecs.lucene50.Lucene50DocValuesConsumer.REVERSE_INTERVAL_MASK;
+import static org.apache.lucene.codecs.lucene50.Lucene50DocValuesConsumer.REVERSE_INTERVAL_SHIFT;
+import static org.apache.lucene.codecs.lucene50.Lucene50DocValuesConsumer.SORTED_SINGLE_VALUED;
+import static org.apache.lucene.codecs.lucene50.Lucene50DocValuesConsumer.SORTED_WITH_ADDRESSES;
+import static org.apache.lucene.codecs.lucene50.Lucene50DocValuesConsumer.TABLE_COMPRESSED;
+
/** reader for {@link Lucene50DocValuesFormat} */
class Lucene50DocValuesProducer extends DocValuesProducer implements Closeable {
private final Map numerics = new HashMap<>();
@@ -1141,14 +1140,10 @@ class Lucene50DocValuesProducer extends DocValuesProducer implements Closeable {
}
@Override
- public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
+ public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException {
throw new UnsupportedOperationException();
}
+
}
}
}
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50PostingsFormat.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50PostingsFormat.java
index bcb352b79a9..b4337e321ec 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50PostingsFormat.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50PostingsFormat.java
@@ -30,7 +30,7 @@ import org.apache.lucene.codecs.PostingsReaderBase;
import org.apache.lucene.codecs.PostingsWriterBase;
import org.apache.lucene.codecs.blocktree.BlockTreeTermsReader;
import org.apache.lucene.codecs.blocktree.BlockTreeTermsWriter;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.SegmentReadState;
import org.apache.lucene.index.SegmentWriteState;
@@ -141,7 +141,7 @@ import org.apache.lucene.util.packed.PackedInts;
* determined by the largest integer. Smaller block size result in smaller variance among width
* of integers hence smaller indexes. Larger block size result in more efficient bulk i/o hence
* better acceleration. This value should always be a multiple of 64, currently fixed as 128 as
- * a tradeoff. It is also the skip interval used to accelerate {@link DocsEnum#advance(int)}.
+ * a tradeoff. It is also the skip interval used to accelerate {@link org.apache.lucene.index.PostingsEnum#advance(int)}.
*
DocFPDelta determines the position of this term's TermFreqs within the .doc file.
* In particular, it is the difference of file offset between this term's
* data and previous term's data (or zero, for the first term in the block).On disk it is
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50PostingsReader.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50PostingsReader.java
index 10ad4136eac..2f1ee90458e 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50PostingsReader.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50PostingsReader.java
@@ -24,11 +24,11 @@ import org.apache.lucene.codecs.BlockTermState;
import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.codecs.PostingsReaderBase;
import org.apache.lucene.codecs.lucene50.Lucene50PostingsFormat.IntBlockTermState;
-import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.index.IndexOptions;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.SegmentReadState;
import org.apache.lucene.store.DataInput;
import org.apache.lucene.store.IndexInput;
@@ -193,39 +193,38 @@ public final class Lucene50PostingsReader extends PostingsReaderBase {
}
@Override
- public DocsEnum docs(FieldInfo fieldInfo, BlockTermState termState, Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
- BlockDocsEnum docsEnum;
- if (reuse instanceof BlockDocsEnum) {
- docsEnum = (BlockDocsEnum) reuse;
- if (!docsEnum.canReuse(docIn, fieldInfo)) {
+ public PostingsEnum postings(FieldInfo fieldInfo, BlockTermState termState, Bits liveDocs, PostingsEnum reuse, int flags) throws IOException {
+
+ if ((flags & PostingsEnum.FLAG_POSITIONS) < PostingsEnum.FLAG_POSITIONS) {
+ BlockDocsEnum docsEnum;
+ if (reuse instanceof BlockDocsEnum) {
+ docsEnum = (BlockDocsEnum) reuse;
+ if (!docsEnum.canReuse(docIn, fieldInfo)) {
+ docsEnum = new BlockDocsEnum(fieldInfo);
+ }
+ } else {
docsEnum = new BlockDocsEnum(fieldInfo);
}
- } else {
- docsEnum = new BlockDocsEnum(fieldInfo);
+ return docsEnum.reset(liveDocs, (IntBlockTermState) termState, flags);
}
- return docsEnum.reset(liveDocs, (IntBlockTermState) termState, flags);
- }
-
- // TODO: specialize to liveDocs vs not
-
- @Override
- public DocsAndPositionsEnum docsAndPositions(FieldInfo fieldInfo, BlockTermState termState, Bits liveDocs,
- DocsAndPositionsEnum reuse, int flags)
- throws IOException {
+ boolean indexHasPositions = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0;
boolean indexHasOffsets = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
boolean indexHasPayloads = fieldInfo.hasPayloads();
- if ((!indexHasOffsets || (flags & DocsAndPositionsEnum.FLAG_OFFSETS) == 0) &&
- (!indexHasPayloads || (flags & DocsAndPositionsEnum.FLAG_PAYLOADS) == 0)) {
- BlockDocsAndPositionsEnum docsAndPositionsEnum;
- if (reuse instanceof BlockDocsAndPositionsEnum) {
- docsAndPositionsEnum = (BlockDocsAndPositionsEnum) reuse;
+ if (!indexHasPositions)
+ return null;
+
+ if ((!indexHasOffsets || (flags & PostingsEnum.FLAG_OFFSETS) == 0) &&
+ (!indexHasPayloads || (flags & PostingsEnum.FLAG_PAYLOADS) == 0)) {
+ BlockPostingsEnum docsAndPositionsEnum;
+ if (reuse instanceof BlockPostingsEnum) {
+ docsAndPositionsEnum = (BlockPostingsEnum) reuse;
if (!docsAndPositionsEnum.canReuse(docIn, fieldInfo)) {
- docsAndPositionsEnum = new BlockDocsAndPositionsEnum(fieldInfo);
+ docsAndPositionsEnum = new BlockPostingsEnum(fieldInfo);
}
} else {
- docsAndPositionsEnum = new BlockDocsAndPositionsEnum(fieldInfo);
+ docsAndPositionsEnum = new BlockPostingsEnum(fieldInfo);
}
return docsAndPositionsEnum.reset(liveDocs, (IntBlockTermState) termState);
} else {
@@ -302,7 +301,7 @@ public final class Lucene50PostingsReader extends PostingsReaderBase {
indexHasPayloads == fieldInfo.hasPayloads();
}
- public DocsEnum reset(Bits liveDocs, IntBlockTermState termState, int flags) throws IOException {
+ public PostingsEnum reset(Bits liveDocs, IntBlockTermState termState, int flags) throws IOException {
this.liveDocs = liveDocs;
docFreq = termState.docFreq;
@@ -319,7 +318,7 @@ public final class Lucene50PostingsReader extends PostingsReaderBase {
}
doc = -1;
- this.needsFreq = (flags & DocsEnum.FLAG_FREQS) != 0;
+ this.needsFreq = (flags & PostingsEnum.FLAG_FREQS) != 0;
if (indexHasFreq == false || needsFreq == false) {
Arrays.fill(freqBuffer, 1);
}
@@ -336,6 +335,12 @@ public final class Lucene50PostingsReader extends PostingsReaderBase {
return freq;
}
+ @Override
+ public int nextPosition() throws IOException {
+ assert false; // shouldn't be calling nextPosition() on this
+ return -1;
+ }
+
@Override
public int docID() {
return doc;
@@ -472,7 +477,7 @@ public final class Lucene50PostingsReader extends PostingsReaderBase {
}
- final class BlockDocsAndPositionsEnum extends DocsAndPositionsEnum {
+ final class BlockPostingsEnum extends PostingsEnum {
private final byte[] encoded;
@@ -535,7 +540,7 @@ public final class Lucene50PostingsReader extends PostingsReaderBase {
private Bits liveDocs;
private int singletonDocID; // docid when there is a single pulsed posting, otherwise -1
- public BlockDocsAndPositionsEnum(FieldInfo fieldInfo) throws IOException {
+ public BlockPostingsEnum(FieldInfo fieldInfo) throws IOException {
this.startDocIn = Lucene50PostingsReader.this.docIn;
this.docIn = null;
this.posIn = Lucene50PostingsReader.this.posIn.clone();
@@ -550,7 +555,7 @@ public final class Lucene50PostingsReader extends PostingsReaderBase {
indexHasPayloads == fieldInfo.hasPayloads();
}
- public DocsAndPositionsEnum reset(Bits liveDocs, IntBlockTermState termState) throws IOException {
+ public PostingsEnum reset(Bits liveDocs, IntBlockTermState termState) throws IOException {
this.liveDocs = liveDocs;
docFreq = termState.docFreq;
@@ -769,6 +774,9 @@ public final class Lucene50PostingsReader extends PostingsReaderBase {
@Override
public int nextPosition() throws IOException {
+
+ assert posPendingCount > 0;
+
if (posPendingFP != -1) {
posIn.seek(posPendingFP);
posPendingFP = -1;
@@ -813,7 +821,7 @@ public final class Lucene50PostingsReader extends PostingsReaderBase {
}
// Also handles payloads + offsets
- final class EverythingEnum extends DocsAndPositionsEnum {
+ final class EverythingEnum extends PostingsEnum {
private final byte[] encoded;
@@ -960,8 +968,8 @@ public final class Lucene50PostingsReader extends PostingsReaderBase {
lastPosBlockFP = posTermStartFP + termState.lastPosBlockOffset;
}
- this.needsOffsets = (flags & DocsAndPositionsEnum.FLAG_OFFSETS) != 0;
- this.needsPayloads = (flags & DocsAndPositionsEnum.FLAG_PAYLOADS) != 0;
+ this.needsOffsets = (flags & PostingsEnum.FLAG_OFFSETS) != 0;
+ this.needsPayloads = (flags & PostingsEnum.FLAG_PAYLOADS) != 0;
doc = -1;
accum = 0;
@@ -1228,6 +1236,8 @@ public final class Lucene50PostingsReader extends PostingsReaderBase {
@Override
public int nextPosition() throws IOException {
+ assert posPendingCount > 0;
+
if (posPendingFP != -1) {
posIn.seek(posPendingFP);
posPendingFP = -1;
diff --git a/lucene/core/src/java/org/apache/lucene/index/BufferedUpdatesStream.java b/lucene/core/src/java/org/apache/lucene/index/BufferedUpdatesStream.java
index 5b993e21c52..8d9fad15996 100644
--- a/lucene/core/src/java/org/apache/lucene/index/BufferedUpdatesStream.java
+++ b/lucene/core/src/java/org/apache/lucene/index/BufferedUpdatesStream.java
@@ -374,7 +374,7 @@ class BufferedUpdatesStream implements Accountable {
final int startDelCount;
TermsEnum termsEnum;
- DocsEnum docsEnum;
+ PostingsEnum postingsEnum;
BytesRef term;
boolean any;
@@ -562,12 +562,12 @@ class BufferedUpdatesStream implements Accountable {
if (state.delGen < delGen) {
// we don't need term frequencies for this
- state.docsEnum = state.termsEnum.docs(state.rld.getLiveDocs(), state.docsEnum, DocsEnum.FLAG_NONE);
+ state.postingsEnum = state.termsEnum.postings(state.rld.getLiveDocs(), state.postingsEnum, PostingsEnum.FLAG_NONE);
- assert state.docsEnum != null;
+ assert state.postingsEnum != null;
while (true) {
- final int docID = state.docsEnum.nextDoc();
+ final int docID = state.postingsEnum.nextDoc();
if (docID == DocIdSetIterator.NO_MORE_DOCS) {
break;
}
@@ -623,7 +623,7 @@ class BufferedUpdatesStream implements Accountable {
String currentField = null;
TermsEnum termsEnum = null;
- DocsEnum docsEnum = null;
+ PostingsEnum postingsEnum = null;
for (DocValuesUpdate update : updates) {
Term term = update.term;
@@ -658,14 +658,14 @@ class BufferedUpdatesStream implements Accountable {
if (termsEnum.seekExact(term.bytes())) {
// we don't need term frequencies for this
- docsEnum = termsEnum.docs(segState.rld.getLiveDocs(), docsEnum, DocsEnum.FLAG_NONE);
+ postingsEnum = termsEnum.postings(segState.rld.getLiveDocs(), postingsEnum, PostingsEnum.FLAG_NONE);
DocValuesFieldUpdates dvUpdates = dvUpdatesContainer.getUpdates(update.field, update.type);
if (dvUpdates == null) {
dvUpdates = dvUpdatesContainer.newUpdates(update.field, update.type, segState.reader.maxDoc());
}
int doc;
- while ((doc = docsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
+ while ((doc = postingsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
if (doc >= limit) {
break; // no more docs that can be updated for this term
}
diff --git a/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java b/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java
index 8963535f980..bc60bde217e 100644
--- a/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java
+++ b/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java
@@ -882,9 +882,9 @@ public class CheckIndex implements Closeable {
final Status.TermIndexStatus status = new Status.TermIndexStatus();
int computedFieldCount = 0;
- DocsEnum docs = null;
- DocsEnum docsAndFreqs = null;
- DocsAndPositionsEnum postings = null;
+ PostingsEnum docs = null;
+ PostingsEnum docsAndFreqs = null;
+ PostingsEnum postings = null;
String lastField = null;
for (String field : fields) {
@@ -1026,8 +1026,8 @@ public class CheckIndex implements Closeable {
}
sumDocFreq += docFreq;
- docs = termsEnum.docs(liveDocs, docs);
- postings = termsEnum.docsAndPositions(liveDocs, postings);
+ docs = termsEnum.postings(liveDocs, docs);
+ postings = termsEnum.postings(liveDocs, postings, PostingsEnum.FLAG_ALL);
if (hasFreqs == false) {
if (termsEnum.totalTermFreq() != -1) {
@@ -1051,7 +1051,7 @@ public class CheckIndex implements Closeable {
}
}
- final DocsEnum docs2;
+ final PostingsEnum docs2;
if (postings != null) {
docs2 = postings;
} else {
@@ -1152,7 +1152,7 @@ public class CheckIndex implements Closeable {
// Re-count if there are deleted docs:
if (liveDocs != null) {
if (hasFreqs) {
- final DocsEnum docsNoDel = termsEnum.docs(null, docsAndFreqs);
+ final PostingsEnum docsNoDel = termsEnum.postings(null, docsAndFreqs);
docCount = 0;
totalTermFreq = 0;
while(docsNoDel.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
@@ -1161,7 +1161,7 @@ public class CheckIndex implements Closeable {
totalTermFreq += docsNoDel.freq();
}
} else {
- final DocsEnum docsNoDel = termsEnum.docs(null, docs, DocsEnum.FLAG_NONE);
+ final PostingsEnum docsNoDel = termsEnum.postings(null, docs, PostingsEnum.FLAG_NONE);
docCount = 0;
totalTermFreq = -1;
while(docsNoDel.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
@@ -1188,7 +1188,7 @@ public class CheckIndex implements Closeable {
if (hasPositions) {
for(int idx=0;idx<7;idx++) {
final int skipDocID = (int) (((idx+1)*(long) maxDoc)/8);
- postings = termsEnum.docsAndPositions(liveDocs, postings);
+ postings = termsEnum.postings(liveDocs, postings, PostingsEnum.FLAG_ALL);
final int docID = postings.advance(skipDocID);
if (docID == DocIdSetIterator.NO_MORE_DOCS) {
break;
@@ -1247,7 +1247,7 @@ public class CheckIndex implements Closeable {
} else {
for(int idx=0;idx<7;idx++) {
final int skipDocID = (int) (((idx+1)*(long) maxDoc)/8);
- docs = termsEnum.docs(liveDocs, docs, DocsEnum.FLAG_NONE);
+ docs = termsEnum.postings(liveDocs, docs, PostingsEnum.FLAG_NONE);
final int docID = docs.advance(skipDocID);
if (docID == DocIdSetIterator.NO_MORE_DOCS) {
break;
@@ -1315,7 +1315,7 @@ public class CheckIndex implements Closeable {
}
int expectedDocFreq = termsEnum.docFreq();
- DocsEnum d = termsEnum.docs(null, null, DocsEnum.FLAG_NONE);
+ PostingsEnum d = termsEnum.postings(null, null, PostingsEnum.FLAG_NONE);
int docFreq = 0;
while (d.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
docFreq++;
@@ -1356,7 +1356,7 @@ public class CheckIndex implements Closeable {
throw new RuntimeException("seek to existing term " + seekTerms[i] + " failed");
}
- docs = termsEnum.docs(liveDocs, docs, DocsEnum.FLAG_NONE);
+ docs = termsEnum.postings(liveDocs, docs, PostingsEnum.FLAG_NONE);
if (docs == null) {
throw new RuntimeException("null DocsEnum from to existing term " + seekTerms[i]);
}
@@ -1374,7 +1374,7 @@ public class CheckIndex implements Closeable {
}
totDocFreq += termsEnum.docFreq();
- docs = termsEnum.docs(null, docs, DocsEnum.FLAG_NONE);
+ docs = termsEnum.postings(null, docs, PostingsEnum.FLAG_NONE);
if (docs == null) {
throw new RuntimeException("null DocsEnum from to existing term " + seekTerms[i]);
}
@@ -1806,12 +1806,12 @@ public class CheckIndex implements Closeable {
infoStream.print(" test: term vectors........");
}
- DocsEnum docs = null;
- DocsAndPositionsEnum postings = null;
+ PostingsEnum docs = null;
+ PostingsEnum postings = null;
// Only used if crossCheckTermVectors is true:
- DocsEnum postingsDocs = null;
- DocsAndPositionsEnum postingsPostings = null;
+ PostingsEnum postingsDocs = null;
+ PostingsEnum postingsPostings = null;
final Bits liveDocs = reader.getLiveDocs();
@@ -1878,16 +1878,16 @@ public class CheckIndex implements Closeable {
while ((term = termsEnum.next()) != null) {
if (hasProx) {
- postings = termsEnum.docsAndPositions(null, postings);
+ postings = termsEnum.postings(null, postings, PostingsEnum.FLAG_ALL);
assert postings != null;
docs = null;
} else {
- docs = termsEnum.docs(null, docs);
+ docs = termsEnum.postings(null, docs);
assert docs != null;
postings = null;
}
- final DocsEnum docs2;
+ final PostingsEnum docs2;
if (hasProx) {
assert postings != null;
docs2 = postings;
@@ -1896,14 +1896,14 @@ public class CheckIndex implements Closeable {
docs2 = docs;
}
- final DocsEnum postingsDocs2;
+ final PostingsEnum postingsDocs2;
if (!postingsTermsEnum.seekExact(term)) {
throw new RuntimeException("vector term=" + term + " field=" + field + " does not exist in postings; doc=" + j);
}
- postingsPostings = postingsTermsEnum.docsAndPositions(null, postingsPostings);
+ postingsPostings = postingsTermsEnum.postings(null, postingsPostings, PostingsEnum.FLAG_ALL);
if (postingsPostings == null) {
// Term vectors were indexed w/ pos but postings were not
- postingsDocs = postingsTermsEnum.docs(null, postingsDocs);
+ postingsDocs = postingsTermsEnum.postings(null, postingsDocs);
if (postingsDocs == null) {
throw new RuntimeException("vector term=" + term + " field=" + field + " does not exist in postings; doc=" + j);
}
diff --git a/lucene/core/src/java/org/apache/lucene/index/DocsAndPositionsEnum.java b/lucene/core/src/java/org/apache/lucene/index/DocsAndPositionsEnum.java
deleted file mode 100644
index 60ac2bbdef1..00000000000
--- a/lucene/core/src/java/org/apache/lucene/index/DocsAndPositionsEnum.java
+++ /dev/null
@@ -1,62 +0,0 @@
-package org.apache.lucene.index;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-
-import org.apache.lucene.util.Bits; // javadocs
-import org.apache.lucene.util.BytesRef;
-
-/** Also iterates through positions. */
-public abstract class DocsAndPositionsEnum extends DocsEnum {
-
- /** Flag to pass to {@link TermsEnum#docsAndPositions(Bits,DocsAndPositionsEnum,int)}
- * if you require offsets in the returned enum. */
- public static final int FLAG_OFFSETS = 0x1;
-
- /** Flag to pass to {@link TermsEnum#docsAndPositions(Bits,DocsAndPositionsEnum,int)}
- * if you require payloads in the returned enum. */
- public static final int FLAG_PAYLOADS = 0x2;
-
- /** Sole constructor. (For invocation by subclass
- * constructors, typically implicit.) */
- protected DocsAndPositionsEnum() {
- }
-
- /** Returns the next position. You should only call this
- * up to {@link DocsEnum#freq()} times else
- * the behavior is not defined. If positions were not
- * indexed this will return -1; this only happens if
- * offsets were indexed and you passed needsOffset=true
- * when pulling the enum. */
- public abstract int nextPosition() throws IOException;
-
- /** Returns start offset for the current position, or -1
- * if offsets were not indexed. */
- public abstract int startOffset() throws IOException;
-
- /** Returns end offset for the current position, or -1 if
- * offsets were not indexed. */
- public abstract int endOffset() throws IOException;
-
- /** Returns the payload at this position, or null if no
- * payload was indexed. You should not modify anything
- * (neither members of the returned BytesRef nor bytes
- * in the byte[]). */
- public abstract BytesRef getPayload() throws IOException;
-}
diff --git a/lucene/core/src/java/org/apache/lucene/index/DocsEnum.java b/lucene/core/src/java/org/apache/lucene/index/DocsEnum.java
index bfc00a30ad9..d29cf2fb9f0 100644
--- a/lucene/core/src/java/org/apache/lucene/index/DocsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/index/DocsEnum.java
@@ -19,49 +19,52 @@ package org.apache.lucene.index;
import java.io.IOException;
-import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.util.AttributeSource;
-import org.apache.lucene.util.Bits; // javadocs
+import org.apache.lucene.util.BytesRef;
-/** Iterates through the documents and term freqs.
- * NOTE: you must first call {@link #nextDoc} before using
- * any of the per-doc methods. */
-public abstract class DocsEnum extends DocIdSetIterator {
-
- /**
- * Flag to pass to {@link TermsEnum#docs(Bits,DocsEnum,int)} if you don't
- * require term frequencies in the returned enum. When passed to
- * {@link TermsEnum#docsAndPositions(Bits,DocsAndPositionsEnum,int)} means
- * that no offsets and payloads will be returned.
- */
- public static final int FLAG_NONE = 0x0;
+/**
+ * Convenience class returning empty values for positions, offsets and payloads
+ */
+public abstract class DocsEnum extends PostingsEnum {
- /** Flag to pass to {@link TermsEnum#docs(Bits,DocsEnum,int)}
- * if you require term frequencies in the returned enum. */
- public static final int FLAG_FREQS = 0x1;
-
- private AttributeSource atts = null;
-
- /** Sole constructor. (For invocation by subclass
+ /** Sole constructor. (For invocation by subclass
* constructors, typically implicit.) */
protected DocsEnum() {
+ super();
}
/**
- * Returns term frequency in the current document, or 1 if the field was
- * indexed with {@link IndexOptions#DOCS}. Do not call this before
- * {@link #nextDoc} is first called, nor after {@link #nextDoc} returns
- * {@link DocIdSetIterator#NO_MORE_DOCS}.
- *
- *
- * NOTE: if the {@link DocsEnum} was obtain with {@link #FLAG_NONE},
- * the result of this method is undefined.
+ * @return -1, indicating no positions are available
+ * @throws IOException if a low-level IO exception occurred
*/
- public abstract int freq() throws IOException;
-
- /** Returns the related attributes. */
- public AttributeSource attributes() {
- if (atts == null) atts = new AttributeSource();
- return atts;
+ @Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ /**
+ * @return -1, indicating no offsets are available
+ * @throws IOException if a low-level IO exception occurred
+ */
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ /**
+ * @return -1, indicating no offsets are available
+ * @throws IOException if a low-level IO exception occurred
+ */
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ /**
+ * @return null, indicating no payloads are available
+ * @throws IOException if a low-level IO exception occurred
+ */
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
}
}
diff --git a/lucene/core/src/java/org/apache/lucene/index/FilterLeafReader.java b/lucene/core/src/java/org/apache/lucene/index/FilterLeafReader.java
index 47422a99828..547bc10a259 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FilterLeafReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FilterLeafReader.java
@@ -215,26 +215,22 @@ public class FilterLeafReader extends LeafReader {
}
@Override
- public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
- return in.docs(liveDocs, reuse, flags);
+ public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException {
+ return in.postings(liveDocs, reuse, flags);
}
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
- return in.docsAndPositions(liveDocs, reuse, flags);
- }
}
- /** Base class for filtering {@link DocsEnum} implementations. */
- public static class FilterDocsEnum extends DocsEnum {
+ /** Base class for filtering {@link PostingsEnum} implementations. */
+ public static class FilterDocsEnum extends PostingsEnum {
/** The underlying DocsEnum instance. */
- protected final DocsEnum in;
+ protected final PostingsEnum in;
/**
* Create a new FilterDocsEnum
* @param in the underlying DocsEnum instance.
*/
- public FilterDocsEnum(DocsEnum in) {
+ public FilterDocsEnum(PostingsEnum in) {
if (in == null) {
throw new NullPointerException("incoming DocsEnum cannot be null");
}
@@ -266,53 +262,6 @@ public class FilterLeafReader extends LeafReader {
return in.advance(target);
}
- @Override
- public long cost() {
- return in.cost();
- }
- }
-
- /** Base class for filtering {@link DocsAndPositionsEnum} implementations. */
- public static class FilterDocsAndPositionsEnum extends DocsAndPositionsEnum {
- /** The underlying DocsAndPositionsEnum instance. */
- protected final DocsAndPositionsEnum in;
-
- /**
- * Create a new FilterDocsAndPositionsEnum
- * @param in the underlying DocsAndPositionsEnum instance.
- */
- public FilterDocsAndPositionsEnum(DocsAndPositionsEnum in) {
- if (in == null) {
- throw new NullPointerException("incoming DocsAndPositionsEnum cannot be null");
- }
- this.in = in;
- }
-
- @Override
- public AttributeSource attributes() {
- return in.attributes();
- }
-
- @Override
- public int docID() {
- return in.docID();
- }
-
- @Override
- public int freq() throws IOException {
- return in.freq();
- }
-
- @Override
- public int nextDoc() throws IOException {
- return in.nextDoc();
- }
-
- @Override
- public int advance(int target) throws IOException {
- return in.advance(target);
- }
-
@Override
public int nextPosition() throws IOException {
return in.nextPosition();
@@ -332,7 +281,7 @@ public class FilterLeafReader extends LeafReader {
public BytesRef getPayload() throws IOException {
return in.getPayload();
}
-
+
@Override
public long cost() {
return in.cost();
diff --git a/lucene/core/src/java/org/apache/lucene/index/FilteredTermsEnum.java b/lucene/core/src/java/org/apache/lucene/index/FilteredTermsEnum.java
index b6bfcc41532..85bde392e74 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FilteredTermsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FilteredTermsEnum.java
@@ -179,13 +179,8 @@ public abstract class FilteredTermsEnum extends TermsEnum {
}
@Override
- public DocsEnum docs(Bits bits, DocsEnum reuse, int flags) throws IOException {
- return tenum.docs(bits, reuse, flags);
- }
-
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits bits, DocsAndPositionsEnum reuse, int flags) throws IOException {
- return tenum.docsAndPositions(bits, reuse, flags);
+ public PostingsEnum postings(Bits bits, PostingsEnum reuse, int flags) throws IOException {
+ return tenum.postings(bits, reuse, flags);
}
/** This enum does not support seeking!
diff --git a/lucene/core/src/java/org/apache/lucene/index/FreqProxFields.java b/lucene/core/src/java/org/apache/lucene/index/FreqProxFields.java
index fe5d31f8f0a..b7a3f403ede 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FreqProxFields.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FreqProxFields.java
@@ -24,7 +24,7 @@ import java.util.List;
import java.util.Map;
import org.apache.lucene.index.FreqProxTermsWriterPerField.FreqProxPostingsArray;
-import org.apache.lucene.util.AttributeSource; // javadocs
+import org.apache.lucene.util.AttributeSource;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
@@ -230,14 +230,41 @@ class FreqProxFields extends Fields {
}
@Override
- public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) {
+ public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) {
if (liveDocs != null) {
throw new IllegalArgumentException("liveDocs must be null");
}
+ if ((flags & PostingsEnum.FLAG_POSITIONS) >= PostingsEnum.FLAG_POSITIONS) {
+ FreqProxPostingsEnum posEnum;
+
+ if (!terms.hasProx) {
+ // Caller wants positions but we didn't index them;
+ // don't lie:
+ throw new IllegalArgumentException("did not index positions");
+ }
+
+ if (!terms.hasOffsets && (flags & PostingsEnum.FLAG_OFFSETS) == PostingsEnum.FLAG_OFFSETS) {
+ // Caller wants offsets but we didn't index them;
+ // don't lie:
+ throw new IllegalArgumentException("did not index offsets");
+ }
+
+ if (reuse instanceof FreqProxPostingsEnum) {
+ posEnum = (FreqProxPostingsEnum) reuse;
+ if (posEnum.postingsArray != postingsArray) {
+ posEnum = new FreqProxPostingsEnum(terms, postingsArray);
+ }
+ } else {
+ posEnum = new FreqProxPostingsEnum(terms, postingsArray);
+ }
+ posEnum.reset(sortedTermIDs[ord]);
+ return posEnum;
+ }
+
FreqProxDocsEnum docsEnum;
- if (!terms.hasFreq && (flags & DocsEnum.FLAG_FREQS) != 0) {
+ if (!terms.hasFreq && (flags & PostingsEnum.FLAG_FREQS) != 0) {
// Caller wants freqs but we didn't index them;
// don't lie:
throw new IllegalArgumentException("did not index freq");
@@ -255,37 +282,6 @@ class FreqProxFields extends Fields {
return docsEnum;
}
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) {
- if (liveDocs != null) {
- throw new IllegalArgumentException("liveDocs must be null");
- }
- FreqProxDocsAndPositionsEnum posEnum;
-
- if (!terms.hasProx) {
- // Caller wants positions but we didn't index them;
- // don't lie:
- throw new IllegalArgumentException("did not index positions");
- }
-
- if (!terms.hasOffsets && (flags & DocsAndPositionsEnum.FLAG_OFFSETS) != 0) {
- // Caller wants offsets but we didn't index them;
- // don't lie:
- throw new IllegalArgumentException("did not index offsets");
- }
-
- if (reuse instanceof FreqProxDocsAndPositionsEnum) {
- posEnum = (FreqProxDocsAndPositionsEnum) reuse;
- if (posEnum.postingsArray != postingsArray) {
- posEnum = new FreqProxDocsAndPositionsEnum(terms, postingsArray);
- }
- } else {
- posEnum = new FreqProxDocsAndPositionsEnum(terms, postingsArray);
- }
- posEnum.reset(sortedTermIDs[ord]);
- return posEnum;
- }
-
/**
* Expert: Returns the TermsEnums internal state to position the TermsEnum
* without re-seeking the term dictionary.
@@ -347,6 +343,12 @@ class FreqProxFields extends Fields {
}
}
+ @Override
+ public int nextPosition() throws IOException {
+ assert false : "Shouldn't be calling nextPositions on DocsEnum";
+ return -1;
+ }
+
@Override
public int nextDoc() throws IOException {
if (reader.eof()) {
@@ -389,7 +391,7 @@ class FreqProxFields extends Fields {
}
}
- private static class FreqProxDocsAndPositionsEnum extends DocsAndPositionsEnum {
+ private static class FreqProxPostingsEnum extends PostingsEnum {
final FreqProxTermsWriterPerField terms;
final FreqProxPostingsArray postingsArray;
@@ -407,7 +409,7 @@ class FreqProxFields extends Fields {
boolean hasPayload;
BytesRefBuilder payload = new BytesRefBuilder();
- public FreqProxDocsAndPositionsEnum(FreqProxTermsWriterPerField terms, FreqProxPostingsArray postingsArray) {
+ public FreqProxPostingsEnum(FreqProxTermsWriterPerField terms, FreqProxPostingsArray postingsArray) {
this.terms = terms;
this.postingsArray = postingsArray;
this.readOffsets = terms.hasOffsets;
diff --git a/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriter.java b/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriter.java
index 8e98fbd6ac9..44d20d0db92 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriter.java
@@ -43,7 +43,7 @@ final class FreqProxTermsWriter extends TermsHash {
Collections.sort(deleteTerms);
String lastField = null;
TermsEnum termsEnum = null;
- DocsEnum docsEnum = null;
+ PostingsEnum postingsEnum = null;
for(Term deleteTerm : deleteTerms) {
if (deleteTerm.field().equals(lastField) == false) {
lastField = deleteTerm.field();
@@ -56,11 +56,11 @@ final class FreqProxTermsWriter extends TermsHash {
}
if (termsEnum != null && termsEnum.seekExact(deleteTerm.bytes())) {
- docsEnum = termsEnum.docs(null, docsEnum, 0);
+ postingsEnum = termsEnum.postings(null, postingsEnum, 0);
int delDocLimit = segDeletes.get(deleteTerm);
- assert delDocLimit < DocsEnum.NO_MORE_DOCS;
+ assert delDocLimit < PostingsEnum.NO_MORE_DOCS;
while (true) {
- int doc = docsEnum.nextDoc();
+ int doc = postingsEnum.nextDoc();
if (doc < delDocLimit) {
if (state.liveDocs == null) {
state.liveDocs = state.segmentInfo.getCodec().liveDocsFormat().newLiveDocs(state.segmentInfo.getDocCount());
diff --git a/lucene/core/src/java/org/apache/lucene/index/LeafReader.java b/lucene/core/src/java/org/apache/lucene/index/LeafReader.java
index 2f9c60422ee..ce181251f0e 100644
--- a/lucene/core/src/java/org/apache/lucene/index/LeafReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/LeafReader.java
@@ -17,11 +17,11 @@ package org.apache.lucene.index;
* limitations under the License.
*/
-import java.io.IOException;
-
import org.apache.lucene.index.IndexReader.ReaderClosedListener;
import org.apache.lucene.util.Bits;
+import java.io.IOException;
+
/** {@code LeafReader} is an abstract class, providing an interface for accessing an
index. Search of an index is done entirely through this abstract interface,
so that any subclass which implements it is searchable. IndexReaders implemented
@@ -205,38 +205,25 @@ public abstract class LeafReader extends IndexReader {
return fields().terms(field);
}
- /** Returns {@link DocsEnum} for the specified term.
+ /** Returns {@link PostingsEnum} for the specified term.
* This will return null if either the field or
* term does not exist.
- * @see TermsEnum#docs(Bits, DocsEnum) */
- public final DocsEnum termDocsEnum(Term term) throws IOException {
+ * @see TermsEnum#postings(Bits, PostingsEnum) */
+ public final PostingsEnum termDocsEnum(Term term, int flags) throws IOException {
assert term.field() != null;
assert term.bytes() != null;
final Terms terms = terms(term.field());
if (terms != null) {
final TermsEnum termsEnum = terms.iterator(null);
if (termsEnum.seekExact(term.bytes())) {
- return termsEnum.docs(getLiveDocs(), null);
+ return termsEnum.postings(getLiveDocs(), null, flags);
}
}
return null;
}
- /** Returns {@link DocsAndPositionsEnum} for the specified
- * term. This will return null if the
- * field or term does not exist or positions weren't indexed.
- * @see TermsEnum#docsAndPositions(Bits, DocsAndPositionsEnum) */
- public final DocsAndPositionsEnum termPositionsEnum(Term term) throws IOException {
- assert term.field() != null;
- assert term.bytes() != null;
- final Terms terms = terms(term.field());
- if (terms != null) {
- final TermsEnum termsEnum = terms.iterator(null);
- if (termsEnum.seekExact(term.bytes())) {
- return termsEnum.docsAndPositions(getLiveDocs(), null);
- }
- }
- return null;
+ public final PostingsEnum termDocsEnum(Term term) throws IOException {
+ return termDocsEnum(term, PostingsEnum.FLAG_FREQS);
}
/** Returns {@link NumericDocValues} for this field, or
diff --git a/lucene/core/src/java/org/apache/lucene/index/MappedMultiFields.java b/lucene/core/src/java/org/apache/lucene/index/MappedMultiFields.java
index fad0eed43d5..85fc6145b8d 100644
--- a/lucene/core/src/java/org/apache/lucene/index/MappedMultiFields.java
+++ b/lucene/core/src/java/org/apache/lucene/index/MappedMultiFields.java
@@ -103,10 +103,23 @@ public class MappedMultiFields extends FilterFields {
}
@Override
- public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
+ public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException {
if (liveDocs != null) {
throw new IllegalArgumentException("liveDocs must be null");
}
+
+ MappingMultiPostingsEnum mappingDocsAndPositionsEnum;
+ if (reuse instanceof MappingMultiPostingsEnum) {
+ mappingDocsAndPositionsEnum = (MappingMultiPostingsEnum) reuse;
+ } else {
+ mappingDocsAndPositionsEnum = new MappingMultiPostingsEnum(mergeState);
+ }
+
+ MultiPostingsEnum docsAndPositionsEnum = (MultiPostingsEnum) in.postings(liveDocs, mappingDocsAndPositionsEnum.multiDocsAndPositionsEnum, flags);
+ mappingDocsAndPositionsEnum.reset(docsAndPositionsEnum);
+ return mappingDocsAndPositionsEnum;
+
+/*
MappingMultiDocsEnum mappingDocsEnum;
if (reuse instanceof MappingMultiDocsEnum) {
mappingDocsEnum = (MappingMultiDocsEnum) reuse;
@@ -116,24 +129,7 @@ public class MappedMultiFields extends FilterFields {
MultiDocsEnum docsEnum = (MultiDocsEnum) in.docs(liveDocs, mappingDocsEnum.multiDocsEnum, flags);
mappingDocsEnum.reset(docsEnum);
- return mappingDocsEnum;
- }
-
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
- if (liveDocs != null) {
- throw new IllegalArgumentException("liveDocs must be null");
- }
- MappingMultiDocsAndPositionsEnum mappingDocsAndPositionsEnum;
- if (reuse instanceof MappingMultiDocsAndPositionsEnum) {
- mappingDocsAndPositionsEnum = (MappingMultiDocsAndPositionsEnum) reuse;
- } else {
- mappingDocsAndPositionsEnum = new MappingMultiDocsAndPositionsEnum(mergeState);
- }
-
- MultiDocsAndPositionsEnum docsAndPositionsEnum = (MultiDocsAndPositionsEnum) in.docsAndPositions(liveDocs, mappingDocsAndPositionsEnum.multiDocsAndPositionsEnum, flags);
- mappingDocsAndPositionsEnum.reset(docsAndPositionsEnum);
- return mappingDocsAndPositionsEnum;
+ return mappingDocsEnum;*/
}
}
}
diff --git a/lucene/core/src/java/org/apache/lucene/index/MappingMultiDocsEnum.java b/lucene/core/src/java/org/apache/lucene/index/MappingMultiDocsEnum.java
deleted file mode 100644
index 2aa9e5f8d23..00000000000
--- a/lucene/core/src/java/org/apache/lucene/index/MappingMultiDocsEnum.java
+++ /dev/null
@@ -1,121 +0,0 @@
-package org.apache.lucene.index;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.index.MultiDocsEnum.EnumWithSlice;
-
-import java.io.IOException;
-
-/**
- * Exposes flex API, merged from flex API of sub-segments,
- * remapping docIDs (this is used for segment merging).
- *
- * @lucene.experimental
- */
-
-final class MappingMultiDocsEnum extends DocsEnum {
- private MultiDocsEnum.EnumWithSlice[] subs;
- int numSubs;
- int upto;
- MergeState.DocMap currentMap;
- DocsEnum current;
- int currentBase;
- int doc = -1;
- private final MergeState mergeState;
- MultiDocsEnum multiDocsEnum;
-
- /** Sole constructor. */
- public MappingMultiDocsEnum(MergeState mergeState) {
- this.mergeState = mergeState;
- }
-
- MappingMultiDocsEnum reset(MultiDocsEnum docsEnum) {
- this.numSubs = docsEnum.getNumSubs();
- this.subs = docsEnum.getSubs();
- this.multiDocsEnum = docsEnum;
- upto = -1;
- current = null;
- return this;
- }
-
- /** How many sub-readers we are merging.
- * @see #getSubs */
- public int getNumSubs() {
- return numSubs;
- }
-
- /** Returns sub-readers we are merging. */
- public EnumWithSlice[] getSubs() {
- return subs;
- }
-
- @Override
- public int freq() throws IOException {
- return current.freq();
- }
-
- @Override
- public int docID() {
- return doc;
- }
-
- @Override
- public int advance(int target) {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public int nextDoc() throws IOException {
- while(true) {
- if (current == null) {
- if (upto == numSubs-1) {
- return this.doc = NO_MORE_DOCS;
- } else {
- upto++;
- final int reader = subs[upto].slice.readerIndex;
- current = subs[upto].docsEnum;
- currentBase = mergeState.docBase[reader];
- currentMap = mergeState.docMaps[reader];
- assert currentMap.maxDoc() == subs[upto].slice.length: "readerIndex=" + reader + " subs.len=" + subs.length + " len1=" + currentMap.maxDoc() + " vs " + subs[upto].slice.length;
- }
- }
-
- int doc = current.nextDoc();
- if (doc != NO_MORE_DOCS) {
- // compact deletions
- doc = currentMap.get(doc);
- if (doc == -1) {
- continue;
- }
- return this.doc = currentBase + doc;
- } else {
- current = null;
- }
- }
- }
-
- @Override
- public long cost() {
- long cost = 0;
- for (EnumWithSlice enumWithSlice : subs) {
- cost += enumWithSlice.docsEnum.cost();
- }
- return cost;
- }
-}
-
diff --git a/lucene/core/src/java/org/apache/lucene/index/MappingMultiDocsAndPositionsEnum.java b/lucene/core/src/java/org/apache/lucene/index/MappingMultiPostingsEnum.java
similarity index 85%
rename from lucene/core/src/java/org/apache/lucene/index/MappingMultiDocsAndPositionsEnum.java
rename to lucene/core/src/java/org/apache/lucene/index/MappingMultiPostingsEnum.java
index 8fd316a5ca8..45f9a3ba5dc 100644
--- a/lucene/core/src/java/org/apache/lucene/index/MappingMultiDocsAndPositionsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/index/MappingMultiPostingsEnum.java
@@ -17,11 +17,11 @@ package org.apache.lucene.index;
* limitations under the License.
*/
-import org.apache.lucene.index.MultiDocsAndPositionsEnum.EnumWithSlice;
-import org.apache.lucene.util.BytesRef;
-
import java.io.IOException;
+import org.apache.lucene.index.MultiPostingsEnum.EnumWithSlice;
+import org.apache.lucene.util.BytesRef;
+
/**
* Exposes flex API, merged from flex API of sub-segments,
* remapping docIDs (this is used for segment merging).
@@ -29,23 +29,23 @@ import java.io.IOException;
* @lucene.experimental
*/
-final class MappingMultiDocsAndPositionsEnum extends DocsAndPositionsEnum {
- private MultiDocsAndPositionsEnum.EnumWithSlice[] subs;
+final class MappingMultiPostingsEnum extends PostingsEnum {
+ private MultiPostingsEnum.EnumWithSlice[] subs;
int numSubs;
int upto;
MergeState.DocMap currentMap;
- DocsAndPositionsEnum current;
+ PostingsEnum current;
int currentBase;
int doc = -1;
private MergeState mergeState;
- MultiDocsAndPositionsEnum multiDocsAndPositionsEnum;
+ MultiPostingsEnum multiDocsAndPositionsEnum;
/** Sole constructor. */
- public MappingMultiDocsAndPositionsEnum(MergeState mergeState) {
+ public MappingMultiPostingsEnum(MergeState mergeState) {
this.mergeState = mergeState;
}
- MappingMultiDocsAndPositionsEnum reset(MultiDocsAndPositionsEnum postingsEnum) {
+ MappingMultiPostingsEnum reset(MultiPostingsEnum postingsEnum) {
this.numSubs = postingsEnum.getNumSubs();
this.subs = postingsEnum.getSubs();
upto = -1;
@@ -89,7 +89,7 @@ final class MappingMultiDocsAndPositionsEnum extends DocsAndPositionsEnum {
} else {
upto++;
final int reader = subs[upto].slice.readerIndex;
- current = subs[upto].docsAndPositionsEnum;
+ current = subs[upto].postingsEnum;
currentBase = mergeState.docBase[reader];
currentMap = mergeState.docMaps[reader];
}
@@ -133,7 +133,7 @@ final class MappingMultiDocsAndPositionsEnum extends DocsAndPositionsEnum {
public long cost() {
long cost = 0;
for (EnumWithSlice enumWithSlice : subs) {
- cost += enumWithSlice.docsAndPositionsEnum.cost();
+ cost += enumWithSlice.postingsEnum.cost();
}
return cost;
}
diff --git a/lucene/core/src/java/org/apache/lucene/index/MultiDocsEnum.java b/lucene/core/src/java/org/apache/lucene/index/MultiDocsEnum.java
deleted file mode 100644
index 082d266f0a7..00000000000
--- a/lucene/core/src/java/org/apache/lucene/index/MultiDocsEnum.java
+++ /dev/null
@@ -1,176 +0,0 @@
-package org.apache.lucene.index;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-import java.io.IOException;
-import java.util.Arrays;
-
-/**
- * Exposes {@link DocsEnum}, merged from {@link DocsEnum}
- * API of sub-segments.
- *
- * @lucene.experimental
- */
-
-public final class MultiDocsEnum extends DocsEnum {
- private final MultiTermsEnum parent;
- final DocsEnum[] subDocsEnum;
- private final EnumWithSlice[] subs;
- int numSubs;
- int upto;
- DocsEnum current;
- int currentBase;
- int doc = -1;
-
- /** Sole constructor
- * @param parent The {@link MultiTermsEnum} that created us.
- * @param subReaderCount How many sub-readers are being merged. */
- public MultiDocsEnum(MultiTermsEnum parent, int subReaderCount) {
- this.parent = parent;
- subDocsEnum = new DocsEnum[subReaderCount];
- this.subs = new EnumWithSlice[subReaderCount];
- for (int i = 0; i < subs.length; i++) {
- subs[i] = new EnumWithSlice();
- }
- }
-
- MultiDocsEnum reset(final EnumWithSlice[] subs, final int numSubs) {
- this.numSubs = numSubs;
-
- for(int i=0;i doc;
- while(true) {
- if (current != null) {
- final int doc;
- if (target < currentBase) {
- // target was in the previous slice but there was no matching doc after it
- doc = current.nextDoc();
- } else {
- doc = current.advance(target-currentBase);
- }
- if (doc == NO_MORE_DOCS) {
- current = null;
- } else {
- return this.doc = doc + currentBase;
- }
- } else if (upto == numSubs-1) {
- return this.doc = NO_MORE_DOCS;
- } else {
- upto++;
- current = subs[upto].docsEnum;
- currentBase = subs[upto].slice.start;
- }
- }
- }
-
- @Override
- public int nextDoc() throws IOException {
- while(true) {
- if (current == null) {
- if (upto == numSubs-1) {
- return this.doc = NO_MORE_DOCS;
- } else {
- upto++;
- current = subs[upto].docsEnum;
- currentBase = subs[upto].slice.start;
- }
- }
-
- final int doc = current.nextDoc();
- if (doc != NO_MORE_DOCS) {
- return this.doc = currentBase + doc;
- } else {
- current = null;
- }
- }
- }
-
- @Override
- public long cost() {
- long cost = 0;
- for (int i = 0; i < numSubs; i++) {
- cost += subs[i].docsEnum.cost();
- }
- return cost;
- }
-
- // TODO: implement bulk read more efficiently than super
- /** Holds a {@link DocsEnum} along with the
- * corresponding {@link ReaderSlice}. */
- public final static class EnumWithSlice {
- EnumWithSlice() {
- }
-
- /** {@link DocsEnum} of this sub-reader. */
- public DocsEnum docsEnum;
-
- /** {@link ReaderSlice} describing how this sub-reader
- * fits into the composite reader. */
- public ReaderSlice slice;
-
- @Override
- public String toString() {
- return slice.toString()+":"+docsEnum;
- }
- }
-
- @Override
- public String toString() {
- return "MultiDocsEnum(" + Arrays.toString(getSubs()) + ")";
- }
-}
-
diff --git a/lucene/core/src/java/org/apache/lucene/index/MultiFields.java b/lucene/core/src/java/org/apache/lucene/index/MultiFields.java
index 8a6dd0cfe1f..55f0c1b29fe 100644
--- a/lucene/core/src/java/org/apache/lucene/index/MultiFields.java
+++ b/lucene/core/src/java/org/apache/lucene/index/MultiFields.java
@@ -120,55 +120,55 @@ public final class MultiFields extends Fields {
return getFields(r).terms(field);
}
- /** Returns {@link DocsEnum} for the specified field and
+ /** Returns {@link PostingsEnum} for the specified field and
* term. This will return null if the field or term does
* not exist. */
- public static DocsEnum getTermDocsEnum(IndexReader r, Bits liveDocs, String field, BytesRef term) throws IOException {
- return getTermDocsEnum(r, liveDocs, field, term, DocsEnum.FLAG_FREQS);
+ public static PostingsEnum getTermDocsEnum(IndexReader r, Bits liveDocs, String field, BytesRef term) throws IOException {
+ return getTermDocsEnum(r, liveDocs, field, term, PostingsEnum.FLAG_FREQS);
}
- /** Returns {@link DocsEnum} for the specified field and
+ /** Returns {@link PostingsEnum} for the specified field and
* term, with control over whether freqs are required.
* Some codecs may be able to optimize their
* implementation when freqs are not required. This will
* return null if the field or term does not exist. See {@link
- * TermsEnum#docs(Bits,DocsEnum,int)}.*/
- public static DocsEnum getTermDocsEnum(IndexReader r, Bits liveDocs, String field, BytesRef term, int flags) throws IOException {
+ * TermsEnum#postings(Bits, PostingsEnum,int)}.*/
+ public static PostingsEnum getTermDocsEnum(IndexReader r, Bits liveDocs, String field, BytesRef term, int flags) throws IOException {
assert field != null;
assert term != null;
final Terms terms = getTerms(r, field);
if (terms != null) {
final TermsEnum termsEnum = terms.iterator(null);
if (termsEnum.seekExact(term)) {
- return termsEnum.docs(liveDocs, null, flags);
+ return termsEnum.postings(liveDocs, null, flags);
}
}
return null;
}
- /** Returns {@link DocsAndPositionsEnum} for the specified
+ /** Returns {@link PostingsEnum} for the specified
* field and term. This will return null if the field or
* term does not exist or positions were not indexed.
* @see #getTermPositionsEnum(IndexReader, Bits, String, BytesRef, int) */
- public static DocsAndPositionsEnum getTermPositionsEnum(IndexReader r, Bits liveDocs, String field, BytesRef term) throws IOException {
- return getTermPositionsEnum(r, liveDocs, field, term, DocsAndPositionsEnum.FLAG_OFFSETS | DocsAndPositionsEnum.FLAG_PAYLOADS);
+ public static PostingsEnum getTermPositionsEnum(IndexReader r, Bits liveDocs, String field, BytesRef term) throws IOException {
+ return getTermPositionsEnum(r, liveDocs, field, term, PostingsEnum.FLAG_OFFSETS | PostingsEnum.FLAG_PAYLOADS);
}
- /** Returns {@link DocsAndPositionsEnum} for the specified
+ /** Returns {@link PostingsEnum} for the specified
* field and term, with control over whether offsets and payloads are
* required. Some codecs may be able to optimize
* their implementation when offsets and/or payloads are not
* required. This will return null if the field or term does not
* exist or positions were not indexed. See {@link
- * TermsEnum#docsAndPositions(Bits,DocsAndPositionsEnum,int)}. */
- public static DocsAndPositionsEnum getTermPositionsEnum(IndexReader r, Bits liveDocs, String field, BytesRef term, int flags) throws IOException {
+ * TermsEnum#postings(Bits, PostingsEnum,int)}. */
+ public static PostingsEnum getTermPositionsEnum(IndexReader r, Bits liveDocs, String field, BytesRef term, int flags) throws IOException {
assert field != null;
assert term != null;
final Terms terms = getTerms(r, field);
if (terms != null) {
final TermsEnum termsEnum = terms.iterator(null);
if (termsEnum.seekExact(term)) {
- return termsEnum.docsAndPositions(liveDocs, null, flags);
+ return termsEnum.postings(liveDocs, null, flags);
}
}
return null;
diff --git a/lucene/core/src/java/org/apache/lucene/index/MultiDocsAndPositionsEnum.java b/lucene/core/src/java/org/apache/lucene/index/MultiPostingsEnum.java
similarity index 81%
rename from lucene/core/src/java/org/apache/lucene/index/MultiDocsAndPositionsEnum.java
rename to lucene/core/src/java/org/apache/lucene/index/MultiPostingsEnum.java
index 33e2127bacc..27fc9adb763 100644
--- a/lucene/core/src/java/org/apache/lucene/index/MultiDocsAndPositionsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/index/MultiPostingsEnum.java
@@ -17,31 +17,34 @@ package org.apache.lucene.index;
* limitations under the License.
*/
-import org.apache.lucene.util.BytesRef;
-
import java.io.IOException;
import java.util.Arrays;
+import org.apache.lucene.util.BytesRef;
+
/**
- * Exposes flex API, merged from flex API of sub-segments.
+ * Exposes {@link PostingsEnum}, merged from {@link PostingsEnum}
+ * API of sub-segments.
*
* @lucene.experimental
*/
-public final class MultiDocsAndPositionsEnum extends DocsAndPositionsEnum {
+public final class MultiPostingsEnum extends PostingsEnum {
private final MultiTermsEnum parent;
- final DocsAndPositionsEnum[] subDocsAndPositionsEnum;
+ final PostingsEnum[] subPostingsEnums;
private final EnumWithSlice[] subs;
int numSubs;
int upto;
- DocsAndPositionsEnum current;
+ PostingsEnum current;
int currentBase;
int doc = -1;
- /** Sole constructor. */
- public MultiDocsAndPositionsEnum(MultiTermsEnum parent, int subReaderCount) {
+ /** Sole constructor.
+ * @param parent The {@link MultiTermsEnum} that created us.
+ * @param subReaderCount How many sub-readers are being merged. */
+ public MultiPostingsEnum(MultiTermsEnum parent, int subReaderCount) {
this.parent = parent;
- subDocsAndPositionsEnum = new DocsAndPositionsEnum[subReaderCount];
+ subPostingsEnums = new PostingsEnum[subReaderCount];
this.subs = new EnumWithSlice[subReaderCount];
for (int i = 0; i < subs.length; i++) {
subs[i] = new EnumWithSlice();
@@ -55,10 +58,10 @@ public final class MultiDocsAndPositionsEnum extends DocsAndPositionsEnum {
}
/** Rre-use and reset this instance on the provided slices. */
- public MultiDocsAndPositionsEnum reset(final EnumWithSlice[] subs, final int numSubs) {
+ public MultiPostingsEnum reset(final EnumWithSlice[] subs, final int numSubs) {
this.numSubs = numSubs;
for(int i=0;i= FLAG_POSITIONS);
+ }
+
+ private AttributeSource atts = null;
+
+ /** Sole constructor. (For invocation by subclass
+ * constructors, typically implicit.) */
+ protected PostingsEnum() {
+ }
+
+ /**
+ * Returns term frequency in the current document, or 1 if the field was
+ * indexed with {@link IndexOptions#DOCS}. Do not call this before
+ * {@link #nextDoc} is first called, nor after {@link #nextDoc} returns
+ * {@link DocIdSetIterator#NO_MORE_DOCS}.
+ *
+ *
+ * NOTE: if the {@link PostingsEnum} was obtain with {@link #FLAG_NONE},
+ * the result of this method is undefined.
+ */
+ public abstract int freq() throws IOException;
+
+ /** Returns the related attributes. */
+ public AttributeSource attributes() {
+ if (atts == null) atts = new AttributeSource();
+ return atts;
+ }
+
+ /**
+ * Returns the next position. If there are no more
+ * positions, or the iterator does not support positions,
+ * this will return DocsEnum.NO_MORE_POSITIONS */
+ public abstract int nextPosition() throws IOException;
+
+ /** Returns start offset for the current position, or -1
+ * if offsets were not indexed. */
+ public abstract int startOffset() throws IOException;
+
+ /** Returns end offset for the current position, or -1 if
+ * offsets were not indexed. */
+ public abstract int endOffset() throws IOException;
+
+ /** Returns the payload at this position, or null if no
+ * payload was indexed. You should not modify anything
+ * (neither members of the returned BytesRef nor bytes
+ * in the byte[]). */
+ public abstract BytesRef getPayload() throws IOException;
+
+}
diff --git a/lucene/core/src/java/org/apache/lucene/index/SortedDocValuesTermsEnum.java b/lucene/core/src/java/org/apache/lucene/index/SortedDocValuesTermsEnum.java
index 24e7d04e46d..f32f6902e22 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SortedDocValuesTermsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SortedDocValuesTermsEnum.java
@@ -106,12 +106,7 @@ class SortedDocValuesTermsEnum extends TermsEnum {
}
@Override
- public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
+ public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException {
throw new UnsupportedOperationException();
}
diff --git a/lucene/core/src/java/org/apache/lucene/index/SortedSetDocValuesTermsEnum.java b/lucene/core/src/java/org/apache/lucene/index/SortedSetDocValuesTermsEnum.java
index 8f52d00ddb2..d0d9ee77bfc 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SortedSetDocValuesTermsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SortedSetDocValuesTermsEnum.java
@@ -17,12 +17,12 @@ package org.apache.lucene.index;
* limitations under the License.
*/
-import java.io.IOException;
-
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
+import java.io.IOException;
+
/** Implements a {@link TermsEnum} wrapping a provided
* {@link SortedSetDocValues}. */
@@ -106,12 +106,7 @@ class SortedSetDocValuesTermsEnum extends TermsEnum {
}
@Override
- public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
+ public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException {
throw new UnsupportedOperationException();
}
diff --git a/lucene/core/src/java/org/apache/lucene/index/TermContext.java b/lucene/core/src/java/org/apache/lucene/index/TermContext.java
index ada4fc1a8a3..e44f0e6aafa 100644
--- a/lucene/core/src/java/org/apache/lucene/index/TermContext.java
+++ b/lucene/core/src/java/org/apache/lucene/index/TermContext.java
@@ -17,11 +17,11 @@ package org.apache.lucene.index;
* limitations under the License.
*/
+import org.apache.lucene.util.BytesRef;
+
import java.io.IOException;
import java.util.Arrays;
-import org.apache.lucene.util.BytesRef;
-
/**
* Maintains a {@link IndexReader} {@link TermState} view over
* {@link IndexReader} instances containing a single term. The
diff --git a/lucene/core/src/java/org/apache/lucene/index/Terms.java b/lucene/core/src/java/org/apache/lucene/index/Terms.java
index a3109affb2e..955197c0d6a 100644
--- a/lucene/core/src/java/org/apache/lucene/index/Terms.java
+++ b/lucene/core/src/java/org/apache/lucene/index/Terms.java
@@ -111,7 +111,7 @@ public abstract class Terms {
public abstract int getDocCount() throws IOException;
/** Returns true if documents in this field store
- * per-document term frequency ({@link DocsEnum#freq}). */
+ * per-document term frequency ({@link PostingsEnum#freq}). */
public abstract boolean hasFreqs();
/** Returns true if documents in this field store offsets. */
diff --git a/lucene/core/src/java/org/apache/lucene/index/TermsEnum.java b/lucene/core/src/java/org/apache/lucene/index/TermsEnum.java
index 895018be0da..8dfc1e22f50 100644
--- a/lucene/core/src/java/org/apache/lucene/index/TermsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/index/TermsEnum.java
@@ -27,9 +27,9 @@ import org.apache.lucene.util.BytesRefIterator;
/** Iterator to seek ({@link #seekCeil(BytesRef)}, {@link
* #seekExact(BytesRef)}) or step through ({@link
* #next} terms to obtain frequency information ({@link
- * #docFreq}), {@link DocsEnum} or {@link
- * DocsAndPositionsEnum} for the current term ({@link
- * #docs}.
+ * #docFreq}), {@link PostingsEnum} or {@link
+ * PostingsEnum} for the current term ({@link
+ * #postings}.
*
*
Term enumerations are always ordered by
* BytesRef.compareTo, which is Unicode sort
@@ -138,57 +138,30 @@ public abstract class TermsEnum implements BytesRefIterator {
* deleted documents into account. */
public abstract long totalTermFreq() throws IOException;
- /** Get {@link DocsEnum} for the current term. Do not
+ /** Get {@link PostingsEnum} for the current term. Do not
* call this when the enum is unpositioned. This method
* will not return null.
*
* @param liveDocs unset bits are documents that should not
* be returned
- * @param reuse pass a prior DocsEnum for possible reuse */
- public final DocsEnum docs(Bits liveDocs, DocsEnum reuse) throws IOException {
- return docs(liveDocs, reuse, DocsEnum.FLAG_FREQS);
+ * @param reuse pass a prior PostingsEnum for possible reuse */
+ public final PostingsEnum postings(Bits liveDocs, PostingsEnum reuse) throws IOException {
+ return postings(liveDocs, reuse, PostingsEnum.FLAG_FREQS);
}
- /** Get {@link DocsEnum} for the current term, with
- * control over whether freqs are required. Do not
- * call this when the enum is unpositioned. This method
- * will not return null.
+ /** Get {@link PostingsEnum} for the current term, with
+ * control over whether freqs, positions, offsets or payloads
+ * are required. Do not call this when the enum is
+ * unpositioned. This method may return null if the postings
+ * information required is not available from the index
*
* @param liveDocs unset bits are documents that should not
* be returned
- * @param reuse pass a prior DocsEnum for possible reuse
+ * @param reuse pass a prior PostingsEnum for possible reuse
* @param flags specifies which optional per-document values
- * you require; see {@link DocsEnum#FLAG_FREQS}
- * @see #docs(Bits, DocsEnum, int) */
- public abstract DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) throws IOException;
-
- /** Get {@link DocsAndPositionsEnum} for the current term.
- * Do not call this when the enum is unpositioned. This
- * method will return null if positions were not
- * indexed.
- *
- * @param liveDocs unset bits are documents that should not
- * be returned
- * @param reuse pass a prior DocsAndPositionsEnum for possible reuse
- * @see #docsAndPositions(Bits, DocsAndPositionsEnum, int) */
- public final DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse) throws IOException {
- return docsAndPositions(liveDocs, reuse, DocsAndPositionsEnum.FLAG_OFFSETS | DocsAndPositionsEnum.FLAG_PAYLOADS);
- }
-
- /** Get {@link DocsAndPositionsEnum} for the current term,
- * with control over whether offsets and payloads are
- * required. Some codecs may be able to optimize their
- * implementation when offsets and/or payloads are not required.
- * Do not call this when the enum is unpositioned. This
- * will return null if positions were not indexed.
-
- * @param liveDocs unset bits are documents that should not
- * be returned
- * @param reuse pass a prior DocsAndPositionsEnum for possible reuse
- * @param flags specifies which optional per-position values you
- * require; see {@link DocsAndPositionsEnum#FLAG_OFFSETS} and
- * {@link DocsAndPositionsEnum#FLAG_PAYLOADS}. */
- public abstract DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException;
+ * you require; see {@link PostingsEnum#FLAG_FREQS}
+ * @see #postings(Bits, PostingsEnum, int) */
+ public abstract PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException;
/**
* Expert: Returns the TermsEnums internal state to position the TermsEnum
@@ -245,12 +218,7 @@ public abstract class TermsEnum implements BytesRefIterator {
}
@Override
- public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) {
- throw new IllegalStateException("this method should never be called");
- }
-
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) {
+ public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) {
throw new IllegalStateException("this method should never be called");
}
@@ -273,5 +241,6 @@ public abstract class TermsEnum implements BytesRefIterator {
public void seekExact(BytesRef term, TermState state) {
throw new IllegalStateException("this method should never be called");
}
+
};
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/CachingCollector.java b/lucene/core/src/java/org/apache/lucene/search/CachingCollector.java
index 140d6d60bc5..4ea0fad0f36 100644
--- a/lucene/core/src/java/org/apache/lucene/search/CachingCollector.java
+++ b/lucene/core/src/java/org/apache/lucene/search/CachingCollector.java
@@ -17,15 +17,16 @@ package org.apache.lucene.search;
* limitations under the License.
*/
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.util.ArrayUtil;
-import org.apache.lucene.util.RamUsageEstimator;
-
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.RamUsageEstimator;
+
/**
* Caches all docs, and optionally also scores, coming from
* a search, and is then able to replay them to another
@@ -73,11 +74,32 @@ public abstract class CachingCollector extends FilterCollector {
@Override
public final int freq() { throw new UnsupportedOperationException(); }
+ @Override
+ public int nextPosition() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
@Override
public final int nextDoc() { throw new UnsupportedOperationException(); }
@Override
public long cost() { return 1; }
+
}
private static class NoScoreCachingCollector extends CachingCollector {
diff --git a/lucene/core/src/java/org/apache/lucene/search/Collector.java b/lucene/core/src/java/org/apache/lucene/search/Collector.java
index e4560da33f1..664035345da 100644
--- a/lucene/core/src/java/org/apache/lucene/search/Collector.java
+++ b/lucene/core/src/java/org/apache/lucene/search/Collector.java
@@ -72,7 +72,7 @@ public interface Collector {
* next atomic reader context
*/
LeafCollector getLeafCollector(LeafReaderContext context) throws IOException;
-
+
/**
* Indicates if document scores are needed by this collector.
*
diff --git a/lucene/core/src/java/org/apache/lucene/search/ConjunctionScorer.java b/lucene/core/src/java/org/apache/lucene/search/ConjunctionScorer.java
index 3e811873618..93781066ca7 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ConjunctionScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ConjunctionScorer.java
@@ -23,9 +23,11 @@ import java.util.Collection;
import java.util.Comparator;
import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.BytesRef;
/** Scorer for conjunctions, sets of queries, all of which are required. */
class ConjunctionScorer extends Scorer {
+
protected int lastDoc = -1;
protected final DocsAndFreqs[] docsAndFreqs;
private final DocsAndFreqs lead;
@@ -34,7 +36,7 @@ class ConjunctionScorer extends Scorer {
ConjunctionScorer(Weight weight, Scorer[] scorers) {
this(weight, scorers, 1f);
}
-
+
ConjunctionScorer(Weight weight, Scorer[] scorers, float coord) {
super(weight);
this.coord = coord;
@@ -109,12 +111,32 @@ class ConjunctionScorer extends Scorer {
}
return sum * coord;
}
-
+
@Override
public int freq() {
return docsAndFreqs.length;
}
+ @Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
@Override
public long cost() {
return lead.scorer.cost();
@@ -133,7 +155,7 @@ class ConjunctionScorer extends Scorer {
final long cost;
final Scorer scorer;
int doc = -1;
-
+
DocsAndFreqs(Scorer scorer) {
this.scorer = scorer;
this.cost = scorer.cost();
diff --git a/lucene/core/src/java/org/apache/lucene/search/ConstantScoreQuery.java b/lucene/core/src/java/org/apache/lucene/search/ConstantScoreQuery.java
index bf50a49abf4..9ba923e89b6 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ConstantScoreQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ConstantScoreQuery.java
@@ -17,17 +17,18 @@ package org.apache.lucene.search;
* limitations under the License.
*/
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.util.Bits;
-import org.apache.lucene.util.ToStringUtils;
-
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.Set;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.ToStringUtils;
+
/**
* A query that wraps another query or a filter and simply returns a constant score equal to the
* query boost for every document that matches the filter or query.
@@ -135,7 +136,6 @@ public class ConstantScoreQuery extends Query {
@Override
public BulkScorer bulkScorer(LeafReaderContext context, Bits acceptDocs, boolean needsScores) throws IOException {
- final DocIdSetIterator disi;
if (filter != null) {
assert query == null;
return super.bulkScorer(context, acceptDocs, needsScores);
@@ -151,23 +151,26 @@ public class ConstantScoreQuery extends Query {
@Override
public Scorer scorer(LeafReaderContext context, Bits acceptDocs, boolean needsScores) throws IOException {
- final DocIdSetIterator disi;
if (filter != null) {
assert query == null;
final DocIdSet dis = filter.getDocIdSet(context, acceptDocs);
if (dis == null) {
return null;
}
- disi = dis.iterator();
+ final DocIdSetIterator disi = dis.iterator();
+ if (disi == null)
+ return null;
+ return new ConstantDocIdSetIteratorScorer(disi, this, queryWeight);
} else {
assert query != null && innerWeight != null;
- disi = innerWeight.scorer(context, acceptDocs, false);
+ Scorer scorer = innerWeight.scorer(context, acceptDocs, false);
+ if (scorer == null) {
+ return null;
+ }
+ return new ConstantScoreScorer(scorer, queryWeight);
}
- if (disi == null) {
- return null;
- }
- return new ConstantScorer(disi, this, queryWeight);
+
}
@Override
@@ -216,7 +219,7 @@ public class ConstantScoreQuery extends Query {
@Override
public void setScorer(Scorer scorer) throws IOException {
// we must wrap again here, but using the scorer passed in as parameter:
- in.setScorer(new ConstantScorer(scorer, weight, theScore));
+ in.setScorer(new ConstantScoreScorer(scorer, theScore));
}
};
}
@@ -227,11 +230,40 @@ public class ConstantScoreQuery extends Query {
}
}
- protected class ConstantScorer extends Scorer {
+ protected class ConstantScoreScorer extends FilterScorer {
+
+ private final float score;
+
+ public ConstantScoreScorer(Scorer wrapped, float score) {
+ super(wrapped);
+ this.score = score;
+ }
+
+ @Override
+ public int freq() throws IOException {
+ return 1;
+ }
+
+ @Override
+ public float score() throws IOException {
+ return score;
+ }
+
+ @Override
+ public Collection getChildren() {
+ if (query != null) {
+ return Collections.singletonList(new ChildScorer(in, "constant"));
+ } else {
+ return Collections.emptyList();
+ }
+ }
+ }
+
+ protected class ConstantDocIdSetIteratorScorer extends Scorer {
final DocIdSetIterator docIdSetIterator;
final float theScore;
- public ConstantScorer(DocIdSetIterator docIdSetIterator, Weight w, float theScore) {
+ public ConstantDocIdSetIteratorScorer(DocIdSetIterator docIdSetIterator, Weight w, float theScore) {
super(w);
this.theScore = theScore;
this.docIdSetIterator = docIdSetIterator;
@@ -258,11 +290,31 @@ public class ConstantScoreQuery extends Query {
return 1;
}
+ @Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
@Override
public int advance(int target) throws IOException {
return docIdSetIterator.advance(target);
}
-
+
@Override
public long cost() {
return docIdSetIterator.cost();
diff --git a/lucene/core/src/java/org/apache/lucene/search/DisjunctionScorer.java b/lucene/core/src/java/org/apache/lucene/search/DisjunctionScorer.java
index ce46c2d952c..4955f332ec0 100644
--- a/lucene/core/src/java/org/apache/lucene/search/DisjunctionScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DisjunctionScorer.java
@@ -22,15 +22,18 @@ import java.util.ArrayList;
import java.util.Collection;
import org.apache.lucene.search.ScorerPriorityQueue.ScorerWrapper;
+import org.apache.lucene.util.BytesRef;
/**
* Base class for Scorers that score disjunctions.
*/
abstract class DisjunctionScorer extends Scorer {
+
private final ScorerPriorityQueue subScorers;
/** The document number of the current match. */
protected int doc = -1;
+ protected int numScorers;
/** Number of matching scorers for the current match. */
private int freq = -1;
/** Linked list of scorers which are on the current doc */
@@ -56,6 +59,26 @@ abstract class DisjunctionScorer extends Scorer {
return children;
}
+ @Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
@Override
public final long cost() {
long sum = 0;
diff --git a/lucene/core/src/java/org/apache/lucene/search/DocValuesTermsFilter.java b/lucene/core/src/java/org/apache/lucene/search/DocValuesTermsFilter.java
index 464e5b22197..f4bc6e1bba8 100644
--- a/lucene/core/src/java/org/apache/lucene/search/DocValuesTermsFilter.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DocValuesTermsFilter.java
@@ -21,7 +21,6 @@ import java.io.IOException;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.DocValues;
-import org.apache.lucene.index.DocsEnum; // javadoc @link
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.SortedDocValues;
import org.apache.lucene.util.Bits;
@@ -63,7 +62,7 @@ import org.apache.lucene.util.FixedBitSet;
*
* In contrast, TermsFilter builds up an {@link FixedBitSet},
* keyed by docID, every time it's created, by enumerating
- * through all matching docs using {@link DocsEnum} to seek
+ * through all matching docs using {@link org.apache.lucene.index.PostingsEnum} to seek
* and scan through each term's docID list. While there is
* no linear scan of all docIDs, besides the allocation of
* the underlying array in the {@link FixedBitSet}, this
diff --git a/lucene/core/src/java/org/apache/lucene/search/ExactPhraseScorer.java b/lucene/core/src/java/org/apache/lucene/search/ExactPhraseScorer.java
index 5de511ce232..9e994650fe0 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ExactPhraseScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ExactPhraseScorer.java
@@ -22,6 +22,7 @@ import java.util.Arrays;
import org.apache.lucene.index.*;
import org.apache.lucene.search.similarities.Similarity;
+import org.apache.lucene.util.BytesRef;
final class ExactPhraseScorer extends Scorer {
private final int endMinus1;
@@ -35,21 +36,21 @@ final class ExactPhraseScorer extends Scorer {
private final long cost;
private final static class ChunkState {
- final DocsAndPositionsEnum posEnum;
+ final PostingsEnum posEnum;
final int offset;
int posUpto;
int posLimit;
int pos;
int lastPos;
- public ChunkState(DocsAndPositionsEnum posEnum, int offset) {
+ public ChunkState(PostingsEnum posEnum, int offset) {
this.posEnum = posEnum;
this.offset = offset;
}
}
private final ChunkState[] chunkStates;
- private final DocsAndPositionsEnum lead;
+ private final PostingsEnum lead;
private int docID = -1;
private int freq;
@@ -81,7 +82,7 @@ final class ExactPhraseScorer extends Scorer {
// TODO: don't dup this logic from conjunctionscorer :)
advanceHead: for(;;) {
for (int i = 1; i < chunkStates.length; i++) {
- final DocsAndPositionsEnum de = chunkStates[i].posEnum;
+ final PostingsEnum de = chunkStates[i].posEnum;
if (de.docID() < doc) {
int d = de.advance(doc);
@@ -126,6 +127,26 @@ final class ExactPhraseScorer extends Scorer {
return freq;
}
+ @Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
@Override
public int docID() {
return docID;
diff --git a/lucene/core/src/java/org/apache/lucene/search/FakeScorer.java b/lucene/core/src/java/org/apache/lucene/search/FakeScorer.java
index e2a50c8f37d..8c7f419046b 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FakeScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FakeScorer.java
@@ -17,8 +17,11 @@ package org.apache.lucene.search;
* limitations under the License.
*/
+import java.io.IOException;
import java.util.Collection;
+import org.apache.lucene.util.BytesRef;
+
/** Used by {@link BulkScorer}s that need to pass a {@link
* Scorer} to {@link LeafCollector#setScorer}. */
final class FakeScorer extends Scorer {
@@ -45,6 +48,26 @@ final class FakeScorer extends Scorer {
return freq;
}
+ @Override
+ public int nextPosition() throws IOException {
+ throw new UnsupportedOperationException("FakeScorer doesn't support nextPosition()");
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ throw new UnsupportedOperationException("FakeScorer doesn't support startOffset()");
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ throw new UnsupportedOperationException("FakeScorer doesn't support endOffset()");
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ throw new UnsupportedOperationException("FakeScorer doesn't support getPayload()");
+ }
+
@Override
public int nextDoc() {
throw new UnsupportedOperationException("FakeScorer doesn't support nextDoc()");
diff --git a/lucene/core/src/java/org/apache/lucene/search/FilterScorer.java b/lucene/core/src/java/org/apache/lucene/search/FilterScorer.java
index 88881bdcab9..5ff8206b3f0 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FilterScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FilterScorer.java
@@ -18,9 +18,9 @@ package org.apache.lucene.search;
*/
import java.io.IOException;
-import java.util.Collection;
import org.apache.lucene.util.AttributeSource;
+import org.apache.lucene.util.BytesRef;
/**
* A {@code FilterScorer} contains another {@code Scorer}, which it
@@ -32,13 +32,27 @@ import org.apache.lucene.util.AttributeSource;
* further override some of these methods and may also provide additional
* methods and fields.
*/
-abstract class FilterScorer extends Scorer {
+public abstract class FilterScorer extends Scorer {
protected final Scorer in;
-
+
+ /**
+ * Create a new FilterScorer
+ * @param in the {@link Scorer} to wrap
+ */
public FilterScorer(Scorer in) {
super(in.weight);
this.in = in;
}
+
+ /**
+ * Create a new FilterScorer with a specific weight
+ * @param in the {@link Scorer} to wrap
+ * @param weight a {@link Weight}
+ */
+ public FilterScorer(Scorer in, Weight weight) {
+ super(weight);
+ this.in = in;
+ }
@Override
public float score() throws IOException {
@@ -60,6 +74,11 @@ abstract class FilterScorer extends Scorer {
return in.nextDoc();
}
+ @Override
+ public int nextPosition() throws IOException {
+ return in.nextPosition();
+ }
+
@Override
public int advance(int target) throws IOException {
return in.advance(target);
@@ -70,6 +89,21 @@ abstract class FilterScorer extends Scorer {
return in.cost();
}
+ @Override
+ public int startOffset() throws IOException {
+ return in.startOffset();
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return in.endOffset();
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return in.getPayload();
+ }
+
@Override
public AttributeSource attributes() {
return in.attributes();
diff --git a/lucene/core/src/java/org/apache/lucene/search/FilteredQuery.java b/lucene/core/src/java/org/apache/lucene/search/FilteredQuery.java
index dae42947aec..d2740e60cdd 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FilteredQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FilteredQuery.java
@@ -17,17 +17,17 @@ package org.apache.lucene.search;
* limitations under the License.
*/
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.util.Bits;
-import org.apache.lucene.util.ToStringUtils;
-
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.Set;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.ToStringUtils;
+
/**
* A query that applies a filter to the results of another query.
@@ -143,6 +143,7 @@ public class FilteredQuery extends Query {
}
return strategy.filteredBulkScorer(context, weight, filterDocIdSet, needsScores);
+
}
};
}
@@ -153,13 +154,13 @@ public class FilteredQuery extends Query {
* than document scoring or if the filter has a linear running time to compute
* the next matching doc like exact geo distances.
*/
- private static final class QueryFirstScorer extends Scorer {
+ private static final class QueryFirstScorer extends FilterScorer {
private final Scorer scorer;
private int scorerDoc = -1;
private final Bits filterBits;
protected QueryFirstScorer(Weight weight, Bits filterBits, Scorer other) {
- super(weight);
+ super(other, weight);
this.scorer = other;
this.filterBits = filterBits;
}
@@ -184,29 +185,16 @@ public class FilteredQuery extends Query {
return scorerDoc = doc;
}
}
-
@Override
public int docID() {
return scorerDoc;
}
-
- @Override
- public float score() throws IOException {
- return scorer.score();
- }
-
- @Override
- public int freq() throws IOException { return scorer.freq(); }
-
+
@Override
public Collection getChildren() {
return Collections.singleton(new ChildScorer(scorer, "FILTERED"));
}
- @Override
- public long cost() {
- return scorer.cost();
- }
}
private static class QueryFirstBulkScorer extends BulkScorer {
@@ -254,7 +242,7 @@ public class FilteredQuery extends Query {
* jumping past the target document. When both land on the same document, it's
* collected.
*/
- private static final class LeapFrogScorer extends Scorer {
+ private static final class LeapFrogScorer extends FilterScorer {
private final DocIdSetIterator secondary;
private final DocIdSetIterator primary;
private final Scorer scorer;
@@ -262,7 +250,7 @@ public class FilteredQuery extends Query {
private int secondaryDoc = -1;
protected LeapFrogScorer(Weight weight, DocIdSetIterator primary, DocIdSetIterator secondary, Scorer scorer) {
- super(weight);
+ super(scorer, weight);
this.primary = primary;
this.secondary = secondary;
this.scorer = scorer;
@@ -302,17 +290,7 @@ public class FilteredQuery extends Query {
public final int docID() {
return secondaryDoc;
}
-
- @Override
- public final float score() throws IOException {
- return scorer.score();
- }
-
- @Override
- public final int freq() throws IOException {
- return scorer.freq();
- }
-
+
@Override
public final Collection getChildren() {
return Collections.singleton(new ChildScorer(scorer, "FILTERED"));
@@ -489,6 +467,7 @@ public class FilteredQuery extends Query {
// ignore scoreDocsInOrder:
return new Weight.DefaultBulkScorer(scorer);
}
+
}
/**
@@ -595,8 +574,7 @@ public class FilteredQuery extends Query {
return LEAP_FROG_QUERY_FIRST_STRATEGY.filteredScorer(context, weight, docIdSet, needsScores);
}
final Scorer scorer = weight.scorer(context, null, needsScores);
- return scorer == null ? null : new QueryFirstScorer(weight,
- filterAcceptDocs, scorer);
+ return scorer == null ? null : new QueryFirstScorer(weight, filterAcceptDocs, scorer);
}
@Override
diff --git a/lucene/core/src/java/org/apache/lucene/search/FuzzyTermsEnum.java b/lucene/core/src/java/org/apache/lucene/search/FuzzyTermsEnum.java
index 319996645b8..60621186654 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FuzzyTermsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FuzzyTermsEnum.java
@@ -22,8 +22,7 @@ import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FilteredTermsEnum;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermState;
@@ -266,14 +265,8 @@ public class FuzzyTermsEnum extends TermsEnum {
}
@Override
- public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
- return actualEnum.docs(liveDocs, reuse, flags);
- }
-
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs,
- DocsAndPositionsEnum reuse, int flags) throws IOException {
- return actualEnum.docsAndPositions(liveDocs, reuse, flags);
+ public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException {
+ return actualEnum.postings(liveDocs, reuse, flags);
}
@Override
diff --git a/lucene/core/src/java/org/apache/lucene/search/MatchAllDocsQuery.java b/lucene/core/src/java/org/apache/lucene/search/MatchAllDocsQuery.java
index 0d9028ae69c..aee68872d14 100644
--- a/lucene/core/src/java/org/apache/lucene/search/MatchAllDocsQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/MatchAllDocsQuery.java
@@ -17,14 +17,15 @@ package org.apache.lucene.search;
* limitations under the License.
*/
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.util.ToStringUtils;
-import org.apache.lucene.util.Bits;
-
-import java.util.Set;
import java.io.IOException;
+import java.util.Set;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.ToStringUtils;
/**
* A query that matches all documents.
@@ -72,6 +73,26 @@ public class MatchAllDocsQuery extends Query {
return 1;
}
+ @Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
@Override
public int advance(int target) throws IOException {
doc = target-1;
diff --git a/lucene/core/src/java/org/apache/lucene/search/MinShouldMatchSumScorer.java b/lucene/core/src/java/org/apache/lucene/search/MinShouldMatchSumScorer.java
index 27873216959..fe8b2498c13 100644
--- a/lucene/core/src/java/org/apache/lucene/search/MinShouldMatchSumScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/MinShouldMatchSumScorer.java
@@ -17,10 +17,6 @@ package org.apache.lucene.search;
* limitations under the License.
*/
-import static org.apache.lucene.search.ScorerPriorityQueue.leftNode;
-import static org.apache.lucene.search.ScorerPriorityQueue.parentNode;
-import static org.apache.lucene.search.ScorerPriorityQueue.rightNode;
-
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
@@ -28,8 +24,13 @@ import java.util.Collections;
import java.util.List;
import org.apache.lucene.search.ScorerPriorityQueue.ScorerWrapper;
+import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.PriorityQueue;
+import static org.apache.lucene.search.ScorerPriorityQueue.leftNode;
+import static org.apache.lucene.search.ScorerPriorityQueue.parentNode;
+import static org.apache.lucene.search.ScorerPriorityQueue.rightNode;
+
/**
* A {@link Scorer} for {@link BooleanQuery} when
* {@link BooleanQuery#setMinimumNumberShouldMatch(int) minShouldMatch} is
@@ -229,6 +230,26 @@ final class MinShouldMatchSumScorer extends Scorer {
}
}
+ @Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
/** Advance tail to the lead until there is a match. */
private int doNext() throws IOException {
while (freq < minShouldMatch) {
diff --git a/lucene/core/src/java/org/apache/lucene/search/MultiCollector.java b/lucene/core/src/java/org/apache/lucene/search/MultiCollector.java
index 627ed835823..5e7a5f62208 100644
--- a/lucene/core/src/java/org/apache/lucene/search/MultiCollector.java
+++ b/lucene/core/src/java/org/apache/lucene/search/MultiCollector.java
@@ -111,7 +111,6 @@ public class MultiCollector implements Collector {
return new MultiLeafCollector(leafCollectors);
}
-
private static class MultiLeafCollector implements LeafCollector {
private final LeafCollector[] collectors;
diff --git a/lucene/core/src/java/org/apache/lucene/search/MultiPhraseQuery.java b/lucene/core/src/java/org/apache/lucene/search/MultiPhraseQuery.java
index 0b84a341406..4061e045ef2 100644
--- a/lucene/core/src/java/org/apache/lucene/search/MultiPhraseQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/MultiPhraseQuery.java
@@ -21,9 +21,8 @@ import java.io.IOException;
import java.util.*;
import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReaderContext;
import org.apache.lucene.index.Term;
@@ -197,11 +196,11 @@ public class MultiPhraseQuery extends Query {
for (int pos=0; pos 1) {
- postingsEnum = new UnionDocsAndPositionsEnum(liveDocs, context, terms, termContexts, termsEnum);
+ postingsEnum = new UnionPostingsEnum(liveDocs, context, terms, termContexts, termsEnum);
// coarse -- this overcounts since a given doc can
// have more than one term:
@@ -229,11 +228,11 @@ public class MultiPhraseQuery extends Query {
return null;
}
termsEnum.seekExact(term.bytes(), termState);
- postingsEnum = termsEnum.docsAndPositions(liveDocs, null, DocsEnum.FLAG_NONE);
+ postingsEnum = termsEnum.postings(liveDocs, null, PostingsEnum.FLAG_POSITIONS);
if (postingsEnum == null) {
// term does exist, but has no positions
- assert termsEnum.docs(liveDocs, null, DocsEnum.FLAG_NONE) != null: "termstate found but no term exists in reader";
+ assert termsEnum.postings(liveDocs, null, PostingsEnum.FLAG_NONE) != null: "termstate found but no term exists in reader";
throw new IllegalStateException("field \"" + term.field() + "\" was indexed without position data; cannot run PhraseQuery (term=" + term.text() + ")");
}
@@ -408,15 +407,15 @@ public class MultiPhraseQuery extends Query {
*/
// TODO: if ever we allow subclassing of the *PhraseScorer
-class UnionDocsAndPositionsEnum extends DocsAndPositionsEnum {
+class UnionPostingsEnum extends PostingsEnum {
- private static final class DocsQueue extends PriorityQueue {
- DocsQueue(List docsEnums) throws IOException {
- super(docsEnums.size());
+ private static final class DocsQueue extends PriorityQueue {
+ DocsQueue(List postingsEnums) throws IOException {
+ super(postingsEnums.size());
- Iterator i = docsEnums.iterator();
+ Iterator i = postingsEnums.iterator();
while (i.hasNext()) {
- DocsAndPositionsEnum postings = i.next();
+ PostingsEnum postings = i.next();
if (postings.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
add(postings);
}
@@ -424,7 +423,7 @@ class UnionDocsAndPositionsEnum extends DocsAndPositionsEnum {
}
@Override
- public final boolean lessThan(DocsAndPositionsEnum a, DocsAndPositionsEnum b) {
+ public final boolean lessThan(PostingsEnum a, PostingsEnum b) {
return a.docID() < b.docID();
}
}
@@ -473,8 +472,8 @@ class UnionDocsAndPositionsEnum extends DocsAndPositionsEnum {
private IntQueue _posList;
private long cost;
- public UnionDocsAndPositionsEnum(Bits liveDocs, LeafReaderContext context, Term[] terms, Map termContexts, TermsEnum termsEnum) throws IOException {
- List docsEnums = new LinkedList<>();
+ public UnionPostingsEnum(Bits liveDocs, LeafReaderContext context, Term[] terms, Map termContexts, TermsEnum termsEnum) throws IOException {
+ List postingsEnums = new LinkedList<>();
for (int i = 0; i < terms.length; i++) {
final Term term = terms[i];
TermState termState = termContexts.get(term).get(context.ord);
@@ -483,16 +482,16 @@ class UnionDocsAndPositionsEnum extends DocsAndPositionsEnum {
continue;
}
termsEnum.seekExact(term.bytes(), termState);
- DocsAndPositionsEnum postings = termsEnum.docsAndPositions(liveDocs, null, DocsEnum.FLAG_NONE);
+ PostingsEnum postings = termsEnum.postings(liveDocs, null, PostingsEnum.FLAG_POSITIONS);
if (postings == null) {
// term does exist, but has no positions
throw new IllegalStateException("field \"" + term.field() + "\" was indexed without position data; cannot run PhraseQuery (term=" + term.text() + ")");
}
cost += postings.cost();
- docsEnums.add(postings);
+ postingsEnums.add(postings);
}
- _queue = new DocsQueue(docsEnums);
+ _queue = new DocsQueue(postingsEnums);
_posList = new IntQueue();
}
@@ -509,7 +508,7 @@ class UnionDocsAndPositionsEnum extends DocsAndPositionsEnum {
_doc = _queue.top().docID();
// merge sort all positions together
- DocsAndPositionsEnum postings;
+ PostingsEnum postings;
do {
postings = _queue.top();
@@ -554,7 +553,7 @@ class UnionDocsAndPositionsEnum extends DocsAndPositionsEnum {
@Override
public final int advance(int target) throws IOException {
while (_queue.top() != null && target > _queue.top().docID()) {
- DocsAndPositionsEnum postings = _queue.pop();
+ PostingsEnum postings = _queue.pop();
if (postings.advance(target) != NO_MORE_DOCS) {
_queue.add(postings);
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/MultiTermQueryWrapperFilter.java b/lucene/core/src/java/org/apache/lucene/search/MultiTermQueryWrapperFilter.java
index f2301b20963..8a7ef23df1f 100644
--- a/lucene/core/src/java/org/apache/lucene/search/MultiTermQueryWrapperFilter.java
+++ b/lucene/core/src/java/org/apache/lucene/search/MultiTermQueryWrapperFilter.java
@@ -19,9 +19,7 @@ package org.apache.lucene.search;
import java.io.IOException;
-import org.apache.lucene.index.DocsEnum;
-import org.apache.lucene.index.Fields;
-import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
@@ -94,9 +92,9 @@ public class MultiTermQueryWrapperFilter extends Filte
assert termsEnum != null;
BitDocIdSet.Builder builder = new BitDocIdSet.Builder(context.reader().maxDoc());
- DocsEnum docs = null;
+ PostingsEnum docs = null;
while (termsEnum.next() != null) {
- docs = termsEnum.docs(acceptDocs, docs, DocsEnum.FLAG_NONE);
+ docs = termsEnum.postings(acceptDocs, docs, PostingsEnum.FLAG_NONE);
builder.or(docs);
}
return builder.build();
diff --git a/lucene/core/src/java/org/apache/lucene/search/PhrasePositions.java b/lucene/core/src/java/org/apache/lucene/search/PhrasePositions.java
index c975b01e5ff..bfc692c4c28 100644
--- a/lucene/core/src/java/org/apache/lucene/search/PhrasePositions.java
+++ b/lucene/core/src/java/org/apache/lucene/search/PhrasePositions.java
@@ -29,13 +29,13 @@ final class PhrasePositions {
int count; // remaining pos in this doc
int offset; // position in phrase
final int ord; // unique across all PhrasePositions instances
- final DocsAndPositionsEnum postings; // stream of docs & positions
+ final PostingsEnum postings; // stream of docs & positions
PhrasePositions next; // used to make lists
int rptGroup = -1; // >=0 indicates that this is a repeating PP
int rptInd; // index in the rptGroup
final Term[] terms; // for repetitions initialization
- PhrasePositions(DocsAndPositionsEnum postings, int o, int ord, Term[] terms) {
+ PhrasePositions(PostingsEnum postings, int o, int ord, Term[] terms) {
this.postings = postings;
offset = o;
this.ord = ord;
diff --git a/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java b/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java
index 5bdae58dbd7..e28f8295960 100644
--- a/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java
@@ -22,19 +22,18 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.Set;
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.IndexReaderContext;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermContext;
import org.apache.lucene.index.TermState;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.search.similarities.Similarity.SimScorer;
import org.apache.lucene.search.similarities.Similarity;
+import org.apache.lucene.search.similarities.Similarity.SimScorer;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.ToStringUtils;
@@ -138,13 +137,13 @@ public class PhraseQuery extends Query {
}
static class PostingsAndFreq implements Comparable {
- final DocsAndPositionsEnum postings;
+ final PostingsEnum postings;
final int docFreq;
final int position;
final Term[] terms;
final int nTerms; // for faster comparisons
- public PostingsAndFreq(DocsAndPositionsEnum postings, int docFreq, int position, Term... terms) {
+ public PostingsAndFreq(PostingsEnum postings, int docFreq, int position, Term... terms) {
this.postings = postings;
this.docFreq = docFreq;
this.position = position;
@@ -267,7 +266,7 @@ public class PhraseQuery extends Query {
return null;
}
te.seekExact(t.bytes(), state);
- DocsAndPositionsEnum postingsEnum = te.docsAndPositions(liveDocs, null, DocsEnum.FLAG_NONE);
+ PostingsEnum postingsEnum = te.postings(liveDocs, null, PostingsEnum.FLAG_POSITIONS);
// PhraseQuery on a field that did not index
// positions.
@@ -276,7 +275,7 @@ public class PhraseQuery extends Query {
// term does exist, but has no positions
throw new IllegalStateException("field \"" + t.field() + "\" was indexed without position data; cannot run PhraseQuery (term=" + t.text() + ")");
}
- postingsFreqs[i] = new PostingsAndFreq(postingsEnum, te.docFreq(), positions.get(i).intValue(), t);
+ postingsFreqs[i] = new PostingsAndFreq(postingsEnum, te.docFreq(), positions.get(i), t);
}
// sort by increasing docFreq order
diff --git a/lucene/core/src/java/org/apache/lucene/search/QueryRescorer.java b/lucene/core/src/java/org/apache/lucene/search/QueryRescorer.java
index c5386df6757..e659868e9c5 100644
--- a/lucene/core/src/java/org/apache/lucene/search/QueryRescorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/QueryRescorer.java
@@ -17,13 +17,14 @@ package org.apache.lucene.search;
* limitations under the License.
*/
+import org.apache.lucene.index.PostingsEnum;
+import org.apache.lucene.index.LeafReaderContext;
+
import java.io.IOException;
import java.util.Arrays;
import java.util.Comparator;
import java.util.List;
-import org.apache.lucene.index.LeafReaderContext;
-
/** A {@link Rescorer} that uses a provided Query to assign
* scores to the first-pass hits.
*
diff --git a/lucene/core/src/java/org/apache/lucene/search/QueryWrapperFilter.java b/lucene/core/src/java/org/apache/lucene/search/QueryWrapperFilter.java
index dac98a3895e..0b7a599b413 100644
--- a/lucene/core/src/java/org/apache/lucene/search/QueryWrapperFilter.java
+++ b/lucene/core/src/java/org/apache/lucene/search/QueryWrapperFilter.java
@@ -17,11 +17,12 @@ package org.apache.lucene.search;
* limitations under the License.
*/
-import java.io.IOException;
-
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.util.Bits;
+import java.io.IOException;
+
/**
* Constrains search results to only match those which also match a provided
* query.
diff --git a/lucene/core/src/java/org/apache/lucene/search/ReqExclScorer.java b/lucene/core/src/java/org/apache/lucene/search/ReqExclScorer.java
index 4e2a5f104e8..cf6aec1cc3b 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ReqExclScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ReqExclScorer.java
@@ -27,7 +27,7 @@ import java.util.Collections;
* This Scorer implements {@link Scorer#advance(int)},
* and it uses the skipTo() on the given scorers.
*/
-class ReqExclScorer extends Scorer {
+class ReqExclScorer extends FilterScorer {
private Scorer reqScorer;
private DocIdSetIterator exclDisi;
private int doc = -1;
@@ -37,7 +37,7 @@ class ReqExclScorer extends Scorer {
* @param exclDisi indicates exclusion.
*/
public ReqExclScorer(Scorer reqScorer, DocIdSetIterator exclDisi) {
- super(reqScorer.weight);
+ super(reqScorer);
this.reqScorer = reqScorer;
this.exclDisi = exclDisi;
}
@@ -103,11 +103,6 @@ class ReqExclScorer extends Scorer {
public float score() throws IOException {
return reqScorer.score(); // reqScorer may be null when next() or skipTo() already return false
}
-
- @Override
- public int freq() throws IOException {
- return reqScorer.freq();
- }
@Override
public Collection getChildren() {
@@ -129,8 +124,4 @@ class ReqExclScorer extends Scorer {
return doc = toNonExcluded();
}
- @Override
- public long cost() {
- return reqScorer.cost();
- }
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/ReqOptSumScorer.java b/lucene/core/src/java/org/apache/lucene/search/ReqOptSumScorer.java
index d7b4d86bb82..0d1b82f2464 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ReqOptSumScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ReqOptSumScorer.java
@@ -20,6 +20,8 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
+import org.apache.lucene.util.BytesRef;
+
/** A Scorer for queries with a required part and an optional part.
* Delays skipTo() on the optional part until a score() is needed.
*
@@ -92,6 +94,26 @@ class ReqOptSumScorer extends Scorer {
return (optScorer != null && optScorer.docID() == reqScorer.docID()) ? 2 : 1;
}
+ @Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
@Override
public Collection getChildren() {
ArrayList children = new ArrayList<>(2);
diff --git a/lucene/core/src/java/org/apache/lucene/search/Scorer.java b/lucene/core/src/java/org/apache/lucene/search/Scorer.java
index 929d3b9a65f..bf6502ef74c 100644
--- a/lucene/core/src/java/org/apache/lucene/search/Scorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/Scorer.java
@@ -21,7 +21,7 @@ import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
/**
* Expert: Common scoring functionality for different types of queries.
@@ -41,7 +41,7 @@ import org.apache.lucene.index.DocsEnum;
* TopScoreDocCollector}) will not properly collect hits
* with these scores.
*/
-public abstract class Scorer extends DocsEnum {
+public abstract class Scorer extends PostingsEnum {
/** the Scorer's parent Weight. in some cases this may be null */
// TODO can we clean this up?
protected final Weight weight;
diff --git a/lucene/core/src/java/org/apache/lucene/search/SloppyPhraseScorer.java b/lucene/core/src/java/org/apache/lucene/search/SloppyPhraseScorer.java
index 5bebeb2ee99..805113b72cf 100644
--- a/lucene/core/src/java/org/apache/lucene/search/SloppyPhraseScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/SloppyPhraseScorer.java
@@ -27,6 +27,7 @@ import java.util.LinkedHashMap;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.similarities.Similarity;
+import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FixedBitSet;
final class SloppyPhraseScorer extends Scorer {
@@ -527,7 +528,27 @@ final class SloppyPhraseScorer extends Scorer {
public int freq() {
return numMatches;
}
-
+
+ @Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
float sloppyFreq() {
return sloppyFreq;
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/TermQuery.java b/lucene/core/src/java/org/apache/lucene/search/TermQuery.java
index 34abd914fac..e371ecfa272 100644
--- a/lucene/core/src/java/org/apache/lucene/search/TermQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/TermQuery.java
@@ -20,60 +20,64 @@ package org.apache.lucene.search;
import java.io.IOException;
import java.util.Set;
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.DocsEnum;
-import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.IndexReaderContext;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.ReaderUtil;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermContext;
import org.apache.lucene.index.TermState;
import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.search.similarities.Similarity.SimScorer;
import org.apache.lucene.search.similarities.Similarity;
+import org.apache.lucene.search.similarities.Similarity.SimScorer;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.ToStringUtils;
-/** A Query that matches documents containing a term.
- This may be combined with other terms with a {@link BooleanQuery}.
- */
+/**
+ * A Query that matches documents containing a term. This may be combined with
+ * other terms with a {@link BooleanQuery}.
+ */
public class TermQuery extends Query {
private final Term term;
private final int docFreq;
private final TermContext perReaderTermState;
-
+
final class TermWeight extends Weight {
private final Similarity similarity;
private final Similarity.SimWeight stats;
private final TermContext termStates;
-
+
public TermWeight(IndexSearcher searcher, TermContext termStates)
- throws IOException {
+ throws IOException {
assert termStates != null : "TermContext must not be null";
this.termStates = termStates;
this.similarity = searcher.getSimilarity();
- this.stats = similarity.computeWeight(
- getBoost(),
- searcher.collectionStatistics(term.field()),
+ this.stats = similarity.computeWeight(getBoost(),
+ searcher.collectionStatistics(term.field()),
searcher.termStatistics(term, termStates));
}
-
+
@Override
- public String toString() { return "weight(" + TermQuery.this + ")"; }
-
+ public String toString() {
+ return "weight(" + TermQuery.this + ")";
+ }
+
@Override
- public Query getQuery() { return TermQuery.this; }
-
+ public Query getQuery() {
+ return TermQuery.this;
+ }
+
@Override
public float getValueForNormalization() {
return stats.getValueForNormalization();
}
-
+
@Override
public void normalize(float queryNorm, float topLevelBoost) {
stats.normalize(queryNorm, topLevelBoost);
}
-
+
@Override
public Scorer scorer(LeafReaderContext context, Bits acceptDocs, boolean needsScores) throws IOException {
assert termStates.topReaderContext == ReaderUtil.getTopLevelContext(context) : "The top-reader used to create Weight (" + termStates.topReaderContext + ") is not the same as the current reader's top-reader (" + ReaderUtil.getTopLevelContext(context);
@@ -81,7 +85,7 @@ public class TermQuery extends Query {
if (termsEnum == null) {
return null;
}
- DocsEnum docs = termsEnum.docs(acceptDocs, null, needsScores ? DocsEnum.FLAG_FREQS : DocsEnum.FLAG_NONE);
+ PostingsEnum docs = termsEnum.postings(acceptDocs, null, needsScores ? PostingsEnum.FLAG_FREQS : PostingsEnum.FLAG_NONE);
assert docs != null;
return new TermScorer(this, docs, similarity.simScorer(stats, context));
}
@@ -96,15 +100,18 @@ public class TermQuery extends Query {
assert termNotInReader(context.reader(), term) : "no termstate found but term exists in reader term=" + term;
return null;
}
- //System.out.println("LD=" + reader.getLiveDocs() + " set?=" + (reader.getLiveDocs() != null ? reader.getLiveDocs().get(0) : "null"));
- final TermsEnum termsEnum = context.reader().terms(term.field()).iterator(null);
+ // System.out.println("LD=" + reader.getLiveDocs() + " set?=" +
+ // (reader.getLiveDocs() != null ? reader.getLiveDocs().get(0) : "null"));
+ final TermsEnum termsEnum = context.reader().terms(term.field())
+ .iterator(null);
termsEnum.seekExact(term.bytes(), state);
return termsEnum;
}
private boolean termNotInReader(LeafReader reader, Term term) throws IOException {
// only called from assert
- //System.out.println("TQ.termNotInReader reader=" + reader + " term=" + field + ":" + bytes.utf8ToString());
+ // System.out.println("TQ.termNotInReader reader=" + reader + " term=" +
+ // field + ":" + bytes.utf8ToString());
return reader.docFreq(term) == 0;
}
@@ -117,69 +124,76 @@ public class TermQuery extends Query {
float freq = scorer.freq();
SimScorer docScorer = similarity.simScorer(stats, context);
ComplexExplanation result = new ComplexExplanation();
- result.setDescription("weight("+getQuery()+" in "+doc+") [" + similarity.getClass().getSimpleName() + "], result of:");
- Explanation scoreExplanation = docScorer.explain(doc, new Explanation(freq, "termFreq=" + freq));
+ result.setDescription("weight(" + getQuery() + " in " + doc + ") ["
+ + similarity.getClass().getSimpleName() + "], result of:");
+ Explanation scoreExplanation = docScorer.explain(doc,
+ new Explanation(freq, "termFreq=" + freq));
result.addDetail(scoreExplanation);
result.setValue(scoreExplanation.getValue());
result.setMatch(true);
return result;
}
}
- return new ComplexExplanation(false, 0.0f, "no matching term");
+ return new ComplexExplanation(false, 0.0f, "no matching term");
}
}
-
+
/** Constructs a query for the term t. */
public TermQuery(Term t) {
this(t, -1);
}
-
- /** Expert: constructs a TermQuery that will use the
- * provided docFreq instead of looking up the docFreq
- * against the searcher. */
+
+ /**
+ * Expert: constructs a TermQuery that will use the provided docFreq instead
+ * of looking up the docFreq against the searcher.
+ */
public TermQuery(Term t, int docFreq) {
term = t;
this.docFreq = docFreq;
perReaderTermState = null;
}
- /** Expert: constructs a TermQuery that will use the
- * provided docFreq instead of looking up the docFreq
- * against the searcher. */
+ /**
+ * Expert: constructs a TermQuery that will use the provided docFreq instead
+ * of looking up the docFreq against the searcher.
+ */
public TermQuery(Term t, TermContext states) {
assert states != null;
term = t;
docFreq = states.docFreq();
perReaderTermState = states;
}
-
+
/** Returns the term of this query. */
- public Term getTerm() { return term; }
-
+ public Term getTerm() {
+ return term;
+ }
+
@Override
public Weight createWeight(IndexSearcher searcher) throws IOException {
final IndexReaderContext context = searcher.getTopReaderContext();
final TermContext termState;
- if (perReaderTermState == null || perReaderTermState.topReaderContext != context) {
- // make TermQuery single-pass if we don't have a PRTS or if the context differs!
+ if (perReaderTermState == null
+ || perReaderTermState.topReaderContext != context) {
+ // make TermQuery single-pass if we don't have a PRTS or if the context
+ // differs!
termState = TermContext.build(context, term);
} else {
- // PRTS was pre-build for this IS
- termState = this.perReaderTermState;
+ // PRTS was pre-build for this IS
+ termState = this.perReaderTermState;
}
-
+
// we must not ignore the given docFreq - if set use the given value (lie)
- if (docFreq != -1)
- termState.setDocFreq(docFreq);
+ if (docFreq != -1) termState.setDocFreq(docFreq);
return new TermWeight(searcher, termState);
}
-
+
@Override
public void extractTerms(Set terms) {
terms.add(getTerm());
}
-
+
/** Prints a user-readable version of this query. */
@Override
public String toString(String field) {
@@ -192,21 +206,20 @@ public class TermQuery extends Query {
buffer.append(ToStringUtils.boost(getBoost()));
return buffer.toString();
}
-
+
/** Returns true iff o is equal to this. */
@Override
public boolean equals(Object o) {
- if (!(o instanceof TermQuery))
- return false;
- TermQuery other = (TermQuery)o;
+ if (!(o instanceof TermQuery)) return false;
+ TermQuery other = (TermQuery) o;
return (this.getBoost() == other.getBoost())
- && this.term.equals(other.term);
+ && this.term.equals(other.term);
}
-
- /** Returns a hash code value for this object.*/
+
+ /** Returns a hash code value for this object. */
@Override
public int hashCode() {
return Float.floatToIntBits(getBoost()) ^ term.hashCode();
}
-
+
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/TermScorer.java b/lucene/core/src/java/org/apache/lucene/search/TermScorer.java
index 66975240c21..70808359bf4 100644
--- a/lucene/core/src/java/org/apache/lucene/search/TermScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/TermScorer.java
@@ -19,78 +19,99 @@ package org.apache.lucene.search;
import java.io.IOException;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.search.similarities.Similarity;
+import org.apache.lucene.util.BytesRef;
/** Expert: A Scorer for documents matching a Term.
*/
final class TermScorer extends Scorer {
- private final DocsEnum docsEnum;
+ private final PostingsEnum postingsEnum;
private final Similarity.SimScorer docScorer;
-
+
/**
* Construct a TermScorer.
- *
+ *
* @param weight
* The weight of the Term in the query.
* @param td
* An iterator over the documents matching the Term.
* @param docScorer
- * The Similarity.SimScorer implementation
+ * The Similarity.SimScorer implementation
* to be used for score computations.
*/
- TermScorer(Weight weight, DocsEnum td, Similarity.SimScorer docScorer) {
+ TermScorer(Weight weight, PostingsEnum td, Similarity.SimScorer docScorer) {
super(weight);
this.docScorer = docScorer;
- this.docsEnum = td;
+ this.postingsEnum = td;
}
@Override
public int docID() {
- return docsEnum.docID();
+ return postingsEnum.docID();
}
@Override
public int freq() throws IOException {
- return docsEnum.freq();
+ return postingsEnum.freq();
+ }
+
+ @Override
+ public int nextPosition() throws IOException {
+ return postingsEnum.nextPosition();
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return postingsEnum.startOffset();
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return postingsEnum.endOffset();
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return postingsEnum.getPayload();
}
/**
* Advances to the next document matching the query.
- *
+ *
* @return the document matching the query or NO_MORE_DOCS if there are no more documents.
*/
@Override
public int nextDoc() throws IOException {
- return docsEnum.nextDoc();
+ return postingsEnum.nextDoc();
}
-
+
@Override
public float score() throws IOException {
assert docID() != NO_MORE_DOCS;
- return docScorer.score(docsEnum.docID(), docsEnum.freq());
+ return docScorer.score(postingsEnum.docID(), postingsEnum.freq());
}
/**
* Advances to the first match beyond the current whose document number is
* greater than or equal to a given target.
- * The implementation uses {@link DocsEnum#advance(int)}.
- *
+ * The implementation uses {@link org.apache.lucene.index.PostingsEnum#advance(int)}.
+ *
* @param target
* The target document number.
* @return the matching document or NO_MORE_DOCS if none exist.
*/
@Override
public int advance(int target) throws IOException {
- return docsEnum.advance(target);
+ return postingsEnum.advance(target);
}
-
+
@Override
public long cost() {
- return docsEnum.cost();
+ return postingsEnum.cost();
}
/** Returns a string representation of this TermScorer. */
@Override
- public String toString() { return "scorer(" + weight + ")"; }
+ public String toString() { return "scorer(" + weight + ")[" + super.toString() + "]"; }
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/TimeLimitingCollector.java b/lucene/core/src/java/org/apache/lucene/search/TimeLimitingCollector.java
index 07674e94ef4..2d4e0263e73 100644
--- a/lucene/core/src/java/org/apache/lucene/search/TimeLimitingCollector.java
+++ b/lucene/core/src/java/org/apache/lucene/search/TimeLimitingCollector.java
@@ -17,12 +17,12 @@ package org.apache.lucene.search;
* limitations under the License.
*/
+import java.io.IOException;
+
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.util.Counter;
import org.apache.lucene.util.ThreadInterruptedException;
-import java.io.IOException;
-
/**
* The {@link TimeLimitingCollector} is used to timeout search requests that
* take longer than the maximum allowed search time limit. After this time is
@@ -156,7 +156,7 @@ public class TimeLimitingCollector implements Collector {
};
}
-
+
@Override
public boolean needsScores() {
return collector.needsScores();
diff --git a/lucene/core/src/java/org/apache/lucene/search/TotalHitCountCollector.java b/lucene/core/src/java/org/apache/lucene/search/TotalHitCountCollector.java
index 577f05ec3ce..59b548f2188 100644
--- a/lucene/core/src/java/org/apache/lucene/search/TotalHitCountCollector.java
+++ b/lucene/core/src/java/org/apache/lucene/search/TotalHitCountCollector.java
@@ -17,7 +17,6 @@ package org.apache.lucene.search;
* limitations under the License.
*/
-
/**
* Just counts the total number of hits.
*/
diff --git a/lucene/core/src/java/org/apache/lucene/search/Weight.java b/lucene/core/src/java/org/apache/lucene/search/Weight.java
index efe7241e7a2..4522287a4ac 100644
--- a/lucene/core/src/java/org/apache/lucene/search/Weight.java
+++ b/lucene/core/src/java/org/apache/lucene/search/Weight.java
@@ -19,8 +19,8 @@ package org.apache.lucene.search;
import java.io.IOException;
+import org.apache.lucene.index.IndexReaderContext;
import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.IndexReaderContext; // javadocs
import org.apache.lucene.search.similarities.Similarity;
import org.apache.lucene.util.Bits;
@@ -187,4 +187,5 @@ public abstract class Weight {
}
}
}
+
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java b/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java
index 0ab3b8941c6..be12f19849d 100644
--- a/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java
@@ -17,11 +17,15 @@ package org.apache.lucene.search.payloads;
* limitations under the License.
*/
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Iterator;
+
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.ComplexExplanation;
import org.apache.lucene.search.Explanation;
-import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
import org.apache.lucene.search.similarities.DefaultSimilarity;
import org.apache.lucene.search.similarities.Similarity;
@@ -37,10 +41,6 @@ import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.ToStringUtils;
-import java.io.IOException;
-import java.util.Collection;
-import java.util.Iterator;
-
/**
* This class is very similar to
* {@link org.apache.lucene.search.spans.SpanNearQuery} except that it factors
diff --git a/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java b/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java
index 0f7c9148f7a..0bab87093ab 100644
--- a/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java
@@ -17,27 +17,27 @@ package org.apache.lucene.search.payloads;
* limitations under the License.
*/
+import java.io.IOException;
+
import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.search.ComplexExplanation;
+import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
-import org.apache.lucene.search.Explanation;
-import org.apache.lucene.search.ComplexExplanation;
import org.apache.lucene.search.similarities.DefaultSimilarity;
import org.apache.lucene.search.similarities.Similarity;
import org.apache.lucene.search.similarities.Similarity.SimScorer;
import org.apache.lucene.search.spans.SpanQuery;
-import org.apache.lucene.search.spans.TermSpans;
+import org.apache.lucene.search.spans.SpanScorer;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.search.spans.SpanWeight;
-import org.apache.lucene.search.spans.SpanScorer;
+import org.apache.lucene.search.spans.TermSpans;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
-import java.io.IOException;
-
/**
* This class is very similar to
* {@link org.apache.lucene.search.spans.SpanTermQuery} except that it factors
@@ -120,7 +120,7 @@ public class PayloadTermQuery extends SpanTermQuery {
protected void processPayload(Similarity similarity) throws IOException {
if (termSpans.isPayloadAvailable()) {
- final DocsAndPositionsEnum postings = termSpans.getPostings();
+ final PostingsEnum postings = termSpans.getPostings();
payload = postings.getPayload();
if (payload != null) {
payloadScore = function.currentScore(doc, term.field(),
diff --git a/lucene/core/src/java/org/apache/lucene/search/similarities/Similarity.java b/lucene/core/src/java/org/apache/lucene/search/similarities/Similarity.java
index ea45f696960..beb7b90498d 100644
--- a/lucene/core/src/java/org/apache/lucene/search/similarities/Similarity.java
+++ b/lucene/core/src/java/org/apache/lucene/search/similarities/Similarity.java
@@ -17,10 +17,8 @@ package org.apache.lucene.search.similarities;
* limitations under the License.
*/
-import java.io.IOException;
-
-import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.FieldInvertState;
+import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.CollectionStatistics;
import org.apache.lucene.search.Explanation;
@@ -28,9 +26,11 @@ import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TermStatistics;
-import org.apache.lucene.search.spans.SpanQuery; // javadoc
+import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.SmallFloat; // javadoc
+import org.apache.lucene.util.SmallFloat;
+
+import java.io.IOException;
/**
* Similarity defines the components of Lucene scoring.
diff --git a/lucene/core/src/java/org/apache/lucene/search/spans/SpanScorer.java b/lucene/core/src/java/org/apache/lucene/search/spans/SpanScorer.java
index 74a098d9222..35cf7b94bce 100644
--- a/lucene/core/src/java/org/apache/lucene/search/spans/SpanScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/spans/SpanScorer.java
@@ -19,9 +19,10 @@ package org.apache.lucene.search.spans;
import java.io.IOException;
-import org.apache.lucene.search.Weight;
import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight;
import org.apache.lucene.search.similarities.Similarity;
+import org.apache.lucene.util.BytesRef;
/**
* Public for extension only.
@@ -96,16 +97,37 @@ public class SpanScorer extends Scorer {
public int freq() throws IOException {
return numMatches;
}
-
+
+ @Override
+ public int nextPosition() throws IOException {
+ throw new UnsupportedOperationException("SpanQueries do not support nextPosition() iteration");
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
/** Returns the intermediate "sloppy freq" adjusted for edit distance
* @lucene.internal */
// only public so .payloads can see it.
public float sloppyFreq() throws IOException {
return freq;
}
-
+
@Override
public long cost() {
return spans.cost();
}
+
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/spans/SpanTermQuery.java b/lucene/core/src/java/org/apache/lucene/search/spans/SpanTermQuery.java
index c6dab4e04a1..a8b7c5f758f 100644
--- a/lucene/core/src/java/org/apache/lucene/search/spans/SpanTermQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/spans/SpanTermQuery.java
@@ -17,10 +17,13 @@ package org.apache.lucene.search.spans;
* limitations under the License.
*/
+import java.io.IOException;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.Fields;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.TermContext;
import org.apache.lucene.index.TermState;
import org.apache.lucene.index.Terms;
@@ -28,10 +31,6 @@ import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.ToStringUtils;
-import java.io.IOException;
-import java.util.Map;
-import java.util.Set;
-
/** Matches spans containing a term. */
public class SpanTermQuery extends SpanQuery {
protected Term term;
@@ -115,7 +114,7 @@ public class SpanTermQuery extends SpanQuery {
final TermsEnum termsEnum = context.reader().terms(term.field()).iterator(null);
termsEnum.seekExact(term.bytes(), state);
- final DocsAndPositionsEnum postings = termsEnum.docsAndPositions(acceptDocs, null, DocsAndPositionsEnum.FLAG_PAYLOADS);
+ final PostingsEnum postings = termsEnum.postings(acceptDocs, null, PostingsEnum.FLAG_PAYLOADS);
if (postings != null) {
return new TermSpans(postings, term);
diff --git a/lucene/core/src/java/org/apache/lucene/search/spans/SpanWeight.java b/lucene/core/src/java/org/apache/lucene/search/spans/SpanWeight.java
index 444929e0f9f..c76e062c1eb 100644
--- a/lucene/core/src/java/org/apache/lucene/search/spans/SpanWeight.java
+++ b/lucene/core/src/java/org/apache/lucene/search/spans/SpanWeight.java
@@ -17,20 +17,26 @@ package org.apache.lucene.search.spans;
* limitations under the License.
*/
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.IndexReaderContext;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.index.TermContext;
-import org.apache.lucene.search.*;
-import org.apache.lucene.search.similarities.Similarity;
-import org.apache.lucene.search.similarities.Similarity.SimScorer;
-import org.apache.lucene.util.Bits;
-
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.TreeSet;
+import org.apache.lucene.index.IndexReaderContext;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermContext;
+import org.apache.lucene.search.ComplexExplanation;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.TermStatistics;
+import org.apache.lucene.search.Weight;
+import org.apache.lucene.search.similarities.Similarity;
+import org.apache.lucene.search.similarities.Similarity.SimScorer;
+import org.apache.lucene.util.Bits;
+
/**
* Expert-only. Public for use by other weight implementations
*/
diff --git a/lucene/core/src/java/org/apache/lucene/search/spans/TermSpans.java b/lucene/core/src/java/org/apache/lucene/search/spans/TermSpans.java
index d4974a517f5..bca88de5ef2 100644
--- a/lucene/core/src/java/org/apache/lucene/search/spans/TermSpans.java
+++ b/lucene/core/src/java/org/apache/lucene/search/spans/TermSpans.java
@@ -17,7 +17,7 @@ package org.apache.lucene.search.spans;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.util.BytesRef;
@@ -30,7 +30,7 @@ import java.util.Collection;
* Public for extension only
*/
public class TermSpans extends Spans {
- protected final DocsAndPositionsEnum postings;
+ protected final PostingsEnum postings;
protected final Term term;
protected int doc;
protected int freq;
@@ -38,7 +38,7 @@ public class TermSpans extends Spans {
protected int position;
protected boolean readPayload;
- public TermSpans(DocsAndPositionsEnum postings, Term term) {
+ public TermSpans(PostingsEnum postings, Term term) {
this.postings = postings;
this.term = term;
doc = -1;
@@ -132,7 +132,7 @@ public class TermSpans extends Spans {
(doc == -1 ? "START" : (doc == Integer.MAX_VALUE) ? "END" : doc + "-" + position);
}
- public DocsAndPositionsEnum getPostings() {
+ public PostingsEnum getPostings() {
return postings;
}
diff --git a/lucene/core/src/test/org/apache/lucene/TestSearchForDuplicates.java b/lucene/core/src/test/org/apache/lucene/TestSearchForDuplicates.java
index 4747557f8fc..c276f9ffa50 100644
--- a/lucene/core/src/test/org/apache/lucene/TestSearchForDuplicates.java
+++ b/lucene/core/src/test/org/apache/lucene/TestSearchForDuplicates.java
@@ -22,11 +22,28 @@ import java.io.PrintWriter;
import java.io.StringWriter;
import java.util.Random;
-import org.apache.lucene.store.*;
-import org.apache.lucene.document.*;
-import org.apache.lucene.analysis.*;
-import org.apache.lucene.index.*;
-import org.apache.lucene.search.*;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.IntField;
+import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.MergePolicy;
+import org.apache.lucene.index.StoredDocument;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase;
public class TestSearchForDuplicates extends LuceneTestCase {
diff --git a/lucene/core/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java b/lucene/core/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java
index a4310c4e2aa..10619a30d06 100644
--- a/lucene/core/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java
+++ b/lucene/core/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java
@@ -25,9 +25,9 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.TextField;
-import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.MultiFields;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.store.Directory;
@@ -84,7 +84,7 @@ public class TestCachingTokenFilter extends BaseTokenStreamTestCase {
writer.addDocument(doc);
IndexReader reader = writer.getReader();
- DocsAndPositionsEnum termPositions = MultiFields.getTermPositionsEnum(reader,
+ PostingsEnum termPositions = MultiFields.getTermPositionsEnum(reader,
MultiFields.getLiveDocs(reader),
"preanalyzed",
new BytesRef("term1"));
diff --git a/lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java b/lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java
index 0a67974e2ee..2e4254a00a5 100644
--- a/lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java
+++ b/lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java
@@ -25,7 +25,7 @@ import java.util.Random;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.LeafReader;
@@ -321,7 +321,7 @@ public class TestMockAnalyzer extends BaseTokenStreamTestCase {
final Terms terms = fields.terms("f");
final TermsEnum te = terms.iterator(null);
assertEquals(new BytesRef("a"), te.next());
- final DocsAndPositionsEnum dpe = te.docsAndPositions(null, null);
+ final PostingsEnum dpe = te.postings(null, null, PostingsEnum.FLAG_ALL);
assertEquals(0, dpe.nextDoc());
assertEquals(2, dpe.freq());
assertEquals(0, dpe.nextPosition());
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/asserting/TestAssertingDocValuesFormat.java b/lucene/core/src/test/org/apache/lucene/codecs/asserting/TestAssertingDocValuesFormat.java
index c15ae9be08f..b6a2c619468 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/asserting/TestAssertingDocValuesFormat.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/asserting/TestAssertingDocValuesFormat.java
@@ -27,5 +27,10 @@ public class TestAssertingDocValuesFormat extends BasePostingsFormatTestCase {
@Override
protected Codec getCodec() {
return codec;
- }
+ }
+
+ @Override
+ protected boolean isPostingsEnumReuseImplemented() {
+ return false;
+ }
}
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/asserting/TestAssertingPostingsFormat.java b/lucene/core/src/test/org/apache/lucene/codecs/asserting/TestAssertingPostingsFormat.java
index 05879b4438e..7c78596bc4a 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/asserting/TestAssertingPostingsFormat.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/asserting/TestAssertingPostingsFormat.java
@@ -27,5 +27,10 @@ public class TestAssertingPostingsFormat extends BasePostingsFormatTestCase {
@Override
protected Codec getCodec() {
return codec;
- }
+ }
+
+ @Override
+ protected boolean isPostingsEnumReuseImplemented() {
+ return false;
+ }
}
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestBlockPostingsFormat3.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestBlockPostingsFormat3.java
index 195746cd53c..ed1dc0ad2b3 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestBlockPostingsFormat3.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestBlockPostingsFormat3.java
@@ -33,8 +33,7 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
@@ -284,93 +283,93 @@ public class TestBlockPostingsFormat3 extends LuceneTestCase {
public void assertTermsEnum(TermsEnum leftTermsEnum, TermsEnum rightTermsEnum, boolean deep) throws Exception {
BytesRef term;
Bits randomBits = new RandomBits(MAXDOC, random().nextDouble(), random());
- DocsAndPositionsEnum leftPositions = null;
- DocsAndPositionsEnum rightPositions = null;
- DocsEnum leftDocs = null;
- DocsEnum rightDocs = null;
+ PostingsEnum leftPositions = null;
+ PostingsEnum rightPositions = null;
+ PostingsEnum leftDocs = null;
+ PostingsEnum rightDocs = null;
while ((term = leftTermsEnum.next()) != null) {
assertEquals(term, rightTermsEnum.next());
assertTermStats(leftTermsEnum, rightTermsEnum);
if (deep) {
// with payloads + off
- assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions),
- rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions));
- assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions),
- rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions));
+ assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.postings(null, leftPositions, PostingsEnum.FLAG_ALL),
+ rightPositions = rightTermsEnum.postings(null, rightPositions, PostingsEnum.FLAG_ALL));
+ assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.postings(randomBits, leftPositions, PostingsEnum.FLAG_ALL),
+ rightPositions = rightTermsEnum.postings(randomBits, rightPositions, PostingsEnum.FLAG_ALL));
assertPositionsSkipping(leftTermsEnum.docFreq(),
- leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions),
- rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions));
+ leftPositions = leftTermsEnum.postings(null, leftPositions, PostingsEnum.FLAG_ALL),
+ rightPositions = rightTermsEnum.postings(null, rightPositions, PostingsEnum.FLAG_ALL));
assertPositionsSkipping(leftTermsEnum.docFreq(),
- leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions),
- rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions));
+ leftPositions = leftTermsEnum.postings(randomBits, leftPositions, PostingsEnum.FLAG_ALL),
+ rightPositions = rightTermsEnum.postings(randomBits, rightPositions, PostingsEnum.FLAG_ALL));
// with payloads only
- assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions, DocsAndPositionsEnum.FLAG_PAYLOADS),
- rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions, DocsAndPositionsEnum.FLAG_PAYLOADS));
- assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions, DocsAndPositionsEnum.FLAG_PAYLOADS),
- rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions, DocsAndPositionsEnum.FLAG_PAYLOADS));
+ assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.postings(null, leftPositions, PostingsEnum.FLAG_PAYLOADS),
+ rightPositions = rightTermsEnum.postings(null, rightPositions, PostingsEnum.FLAG_PAYLOADS));
+ assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.postings(randomBits, leftPositions, PostingsEnum.FLAG_PAYLOADS),
+ rightPositions = rightTermsEnum.postings(randomBits, rightPositions, PostingsEnum.FLAG_PAYLOADS));
assertPositionsSkipping(leftTermsEnum.docFreq(),
- leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions, DocsAndPositionsEnum.FLAG_PAYLOADS),
- rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions, DocsAndPositionsEnum.FLAG_PAYLOADS));
+ leftPositions = leftTermsEnum.postings(null, leftPositions, PostingsEnum.FLAG_PAYLOADS),
+ rightPositions = rightTermsEnum.postings(null, rightPositions, PostingsEnum.FLAG_PAYLOADS));
assertPositionsSkipping(leftTermsEnum.docFreq(),
- leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions, DocsAndPositionsEnum.FLAG_PAYLOADS),
- rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions, DocsAndPositionsEnum.FLAG_PAYLOADS));
+ leftPositions = leftTermsEnum.postings(randomBits, leftPositions, PostingsEnum.FLAG_PAYLOADS),
+ rightPositions = rightTermsEnum.postings(randomBits, rightPositions, PostingsEnum.FLAG_PAYLOADS));
// with offsets only
- assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions, DocsAndPositionsEnum.FLAG_OFFSETS),
- rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions, DocsAndPositionsEnum.FLAG_OFFSETS));
- assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions, DocsAndPositionsEnum.FLAG_OFFSETS),
- rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions, DocsAndPositionsEnum.FLAG_OFFSETS));
+ assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.postings(null, leftPositions, PostingsEnum.FLAG_OFFSETS),
+ rightPositions = rightTermsEnum.postings(null, rightPositions, PostingsEnum.FLAG_OFFSETS));
+ assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.postings(randomBits, leftPositions, PostingsEnum.FLAG_OFFSETS),
+ rightPositions = rightTermsEnum.postings(randomBits, rightPositions, PostingsEnum.FLAG_OFFSETS));
assertPositionsSkipping(leftTermsEnum.docFreq(),
- leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions, DocsAndPositionsEnum.FLAG_OFFSETS),
- rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions, DocsAndPositionsEnum.FLAG_OFFSETS));
+ leftPositions = leftTermsEnum.postings(null, leftPositions, PostingsEnum.FLAG_OFFSETS),
+ rightPositions = rightTermsEnum.postings(null, rightPositions, PostingsEnum.FLAG_OFFSETS));
assertPositionsSkipping(leftTermsEnum.docFreq(),
- leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions, DocsAndPositionsEnum.FLAG_OFFSETS),
- rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions, DocsAndPositionsEnum.FLAG_OFFSETS));
+ leftPositions = leftTermsEnum.postings(randomBits, leftPositions, PostingsEnum.FLAG_OFFSETS),
+ rightPositions = rightTermsEnum.postings(randomBits, rightPositions, PostingsEnum.FLAG_OFFSETS));
// with positions only
- assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions, DocsEnum.FLAG_NONE),
- rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions, DocsEnum.FLAG_NONE));
- assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions, DocsEnum.FLAG_NONE),
- rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions, DocsEnum.FLAG_NONE));
+ assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.postings(null, leftPositions, PostingsEnum.FLAG_POSITIONS),
+ rightPositions = rightTermsEnum.postings(null, rightPositions, PostingsEnum.FLAG_POSITIONS));
+ assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.postings(randomBits, leftPositions, PostingsEnum.FLAG_POSITIONS),
+ rightPositions = rightTermsEnum.postings(randomBits, rightPositions, PostingsEnum.FLAG_POSITIONS));
assertPositionsSkipping(leftTermsEnum.docFreq(),
- leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions, DocsEnum.FLAG_NONE),
- rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions, DocsEnum.FLAG_NONE));
+ leftPositions = leftTermsEnum.postings(null, leftPositions, PostingsEnum.FLAG_POSITIONS),
+ rightPositions = rightTermsEnum.postings(null, rightPositions, PostingsEnum.FLAG_POSITIONS));
assertPositionsSkipping(leftTermsEnum.docFreq(),
- leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions, DocsEnum.FLAG_NONE),
- rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions, DocsEnum.FLAG_NONE));
+ leftPositions = leftTermsEnum.postings(randomBits, leftPositions, PostingsEnum.FLAG_POSITIONS),
+ rightPositions = rightTermsEnum.postings(randomBits, rightPositions, PostingsEnum.FLAG_POSITIONS));
// with freqs:
- assertDocsEnum(leftDocs = leftTermsEnum.docs(null, leftDocs),
- rightDocs = rightTermsEnum.docs(null, rightDocs));
- assertDocsEnum(leftDocs = leftTermsEnum.docs(randomBits, leftDocs),
- rightDocs = rightTermsEnum.docs(randomBits, rightDocs));
+ assertDocsEnum(leftDocs = leftTermsEnum.postings(null, leftDocs),
+ rightDocs = rightTermsEnum.postings(null, rightDocs));
+ assertDocsEnum(leftDocs = leftTermsEnum.postings(randomBits, leftDocs),
+ rightDocs = rightTermsEnum.postings(randomBits, rightDocs));
// w/o freqs:
- assertDocsEnum(leftDocs = leftTermsEnum.docs(null, leftDocs, DocsEnum.FLAG_NONE),
- rightDocs = rightTermsEnum.docs(null, rightDocs, DocsEnum.FLAG_NONE));
- assertDocsEnum(leftDocs = leftTermsEnum.docs(randomBits, leftDocs, DocsEnum.FLAG_NONE),
- rightDocs = rightTermsEnum.docs(randomBits, rightDocs, DocsEnum.FLAG_NONE));
+ assertDocsEnum(leftDocs = leftTermsEnum.postings(null, leftDocs, PostingsEnum.FLAG_NONE),
+ rightDocs = rightTermsEnum.postings(null, rightDocs, PostingsEnum.FLAG_NONE));
+ assertDocsEnum(leftDocs = leftTermsEnum.postings(randomBits, leftDocs, PostingsEnum.FLAG_NONE),
+ rightDocs = rightTermsEnum.postings(randomBits, rightDocs, PostingsEnum.FLAG_NONE));
// with freqs:
assertDocsSkipping(leftTermsEnum.docFreq(),
- leftDocs = leftTermsEnum.docs(null, leftDocs),
- rightDocs = rightTermsEnum.docs(null, rightDocs));
+ leftDocs = leftTermsEnum.postings(null, leftDocs),
+ rightDocs = rightTermsEnum.postings(null, rightDocs));
assertDocsSkipping(leftTermsEnum.docFreq(),
- leftDocs = leftTermsEnum.docs(randomBits, leftDocs),
- rightDocs = rightTermsEnum.docs(randomBits, rightDocs));
+ leftDocs = leftTermsEnum.postings(randomBits, leftDocs),
+ rightDocs = rightTermsEnum.postings(randomBits, rightDocs));
// w/o freqs:
assertDocsSkipping(leftTermsEnum.docFreq(),
- leftDocs = leftTermsEnum.docs(null, leftDocs, DocsEnum.FLAG_NONE),
- rightDocs = rightTermsEnum.docs(null, rightDocs, DocsEnum.FLAG_NONE));
+ leftDocs = leftTermsEnum.postings(null, leftDocs, PostingsEnum.FLAG_NONE),
+ rightDocs = rightTermsEnum.postings(null, rightDocs, PostingsEnum.FLAG_NONE));
assertDocsSkipping(leftTermsEnum.docFreq(),
- leftDocs = leftTermsEnum.docs(randomBits, leftDocs, DocsEnum.FLAG_NONE),
- rightDocs = rightTermsEnum.docs(randomBits, rightDocs, DocsEnum.FLAG_NONE));
+ leftDocs = leftTermsEnum.postings(randomBits, leftDocs, PostingsEnum.FLAG_NONE),
+ rightDocs = rightTermsEnum.postings(randomBits, rightDocs, PostingsEnum.FLAG_NONE));
}
}
assertNull(rightTermsEnum.next());
@@ -389,7 +388,7 @@ public class TestBlockPostingsFormat3 extends LuceneTestCase {
/**
* checks docs + freqs + positions + payloads, sequentially
*/
- public void assertDocsAndPositionsEnum(DocsAndPositionsEnum leftDocs, DocsAndPositionsEnum rightDocs) throws Exception {
+ public void assertDocsAndPositionsEnum(PostingsEnum leftDocs, PostingsEnum rightDocs) throws Exception {
if (leftDocs == null || rightDocs == null) {
assertNull(leftDocs);
assertNull(rightDocs);
@@ -413,7 +412,7 @@ public class TestBlockPostingsFormat3 extends LuceneTestCase {
/**
* checks docs + freqs, sequentially
*/
- public void assertDocsEnum(DocsEnum leftDocs, DocsEnum rightDocs) throws Exception {
+ public void assertDocsEnum(PostingsEnum leftDocs, PostingsEnum rightDocs) throws Exception {
if (leftDocs == null) {
assertNull(rightDocs);
return;
@@ -431,7 +430,7 @@ public class TestBlockPostingsFormat3 extends LuceneTestCase {
/**
* checks advancing docs
*/
- public void assertDocsSkipping(int docFreq, DocsEnum leftDocs, DocsEnum rightDocs) throws Exception {
+ public void assertDocsSkipping(int docFreq, PostingsEnum leftDocs, PostingsEnum rightDocs) throws Exception {
if (leftDocs == null) {
assertNull(rightDocs);
return;
@@ -462,7 +461,7 @@ public class TestBlockPostingsFormat3 extends LuceneTestCase {
/**
* checks advancing docs + positions
*/
- public void assertPositionsSkipping(int docFreq, DocsAndPositionsEnum leftDocs, DocsAndPositionsEnum rightDocs) throws Exception {
+ public void assertPositionsSkipping(int docFreq, PostingsEnum leftDocs, PostingsEnum rightDocs) throws Exception {
if (leftDocs == null || rightDocs == null) {
assertNull(leftDocs);
assertNull(rightDocs);
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50DocValuesFormat.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50DocValuesFormat.java
index 60fbdecdc4d..7959703d0f7 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50DocValuesFormat.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50DocValuesFormat.java
@@ -30,11 +30,11 @@ import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.SortedSetDocValuesField;
import org.apache.lucene.document.StringField;
-import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.BaseCompressingDocValuesFormatTestCase;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.SerialMergeScheduler;
import org.apache.lucene.index.SortedSetDocValues;
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat.java b/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat.java
index 39ebd6ace56..cd2ec13ab6c 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat.java
@@ -39,4 +39,8 @@ public class TestPerFieldPostingsFormat extends BasePostingsFormatTestCase {
assumeTrue("The MockRandom PF randomizes content on the fly, so we can't check it", false);
}
+ @Override
+ public void testPostingsEnumReuse() throws Exception {
+ assumeTrue("The MockRandom PF randomizes content on the fly, so we can't check it", false);
+ }
}
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat2.java b/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat2.java
index 3b19087755a..5be8c78f3b5 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat2.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat2.java
@@ -16,6 +16,7 @@ package org.apache.lucene.codecs.perfield;
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
import java.io.IOException;
import org.apache.lucene.analysis.MockAnalyzer;
@@ -265,7 +266,7 @@ public class TestPerFieldPostingsFormat2 extends LuceneTestCase {
}
dir.close();
}
-
+
public void testSameCodecDifferentInstance() throws Exception {
Codec codec = new AssertingCodec() {
@Override
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java b/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java
index 43caaae8ff1..9d4313c97bb 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java
@@ -538,9 +538,9 @@ public class TestAddIndexes extends LuceneTestCase {
private void verifyTermDocs(Directory dir, Term term, int numDocs)
throws IOException {
IndexReader reader = DirectoryReader.open(dir);
- DocsEnum docsEnum = TestUtil.docs(random(), reader, term.field, term.bytes, null, null, DocsEnum.FLAG_NONE);
+ PostingsEnum postingsEnum = TestUtil.docs(random(), reader, term.field, term.bytes, null, null, PostingsEnum.FLAG_NONE);
int count = 0;
- while (docsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS)
+ while (postingsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS)
count++;
assertEquals(numDocs, count);
reader.close();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestBagOfPositions.java b/lucene/core/src/test/org/apache/lucene/index/TestBagOfPositions.java
index e6877d5e366..04ce2a3e64c 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestBagOfPositions.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestBagOfPositions.java
@@ -144,7 +144,7 @@ public class TestBagOfPositions extends LuceneTestCase {
assertEquals(value, termsEnum.totalTermFreq());
// don't really need to check more than this, as CheckIndex
// will verify that totalTermFreq == total number of positions seen
- // from a docsAndPositionsEnum.
+ // from a postingsEnum.
}
ir.close();
iw.close();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestBagOfPostings.java b/lucene/core/src/test/org/apache/lucene/index/TestBagOfPostings.java
index 831e6347705..ee367aa6116 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestBagOfPostings.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestBagOfPostings.java
@@ -136,7 +136,7 @@ public class TestBagOfPostings extends LuceneTestCase {
assertEquals(value, termsEnum.docFreq());
// don't really need to check more than this, as CheckIndex
// will verify that docFreq == actual number of documents seen
- // from a docsAndPositionsEnum.
+ // from a postingsEnum.
}
ir.close();
iw.close();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java b/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java
index 5ae4d89b92a..8c494feadae 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java
@@ -234,7 +234,7 @@ public class TestCodecs extends LuceneTestCase {
final TermsEnum termsEnum = terms2.iterator(null);
- DocsEnum docsEnum = null;
+ PostingsEnum postingsEnum = null;
for(int i=0;i= maxDoc) {
assertEquals(DocIdSetIterator.NO_MORE_DOCS, advancedTo);
} else {
assertTrue("advanced to: " +advancedTo + " but should be <= " + next, next >= advancedTo);
}
} else {
- docsEnum.nextDoc();
+ postingsEnum.nextDoc();
}
}
}
- assertEquals("docBase: " + context.docBase + " maxDoc: " + maxDoc + " " + docsEnum.getClass(), DocIdSetIterator.NO_MORE_DOCS, docsEnum.docID());
+ assertEquals("docBase: " + context.docBase + " maxDoc: " + maxDoc + " " + postingsEnum.getClass(), DocIdSetIterator.NO_MORE_DOCS, postingsEnum.docID());
}
}
@@ -303,7 +303,7 @@ public class TestDocsAndPositions extends LuceneTestCase {
IndexReaderContext topReaderContext = reader.getContext();
for (LeafReaderContext leafReaderContext : topReaderContext.leaves()) {
- DocsAndPositionsEnum docsAndPosEnum = getDocsAndPositions(
+ PostingsEnum docsAndPosEnum = getDocsAndPositions(
leafReaderContext.reader(), bytes, null);
assertNotNull(docsAndPosEnum);
@@ -336,7 +336,7 @@ public class TestDocsAndPositions extends LuceneTestCase {
writer.addDocument(doc);
DirectoryReader reader = writer.getReader();
LeafReader r = getOnlySegmentReader(reader);
- DocsEnum disi = TestUtil.docs(random(), r, "foo", new BytesRef("bar"), null, null, DocsEnum.FLAG_NONE);
+ PostingsEnum disi = TestUtil.docs(random(), r, "foo", new BytesRef("bar"), null, null, PostingsEnum.FLAG_NONE);
int docid = disi.docID();
assertEquals(-1, docid);
assertTrue(disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -344,7 +344,7 @@ public class TestDocsAndPositions extends LuceneTestCase {
// now reuse and check again
TermsEnum te = r.terms("foo").iterator(null);
assertTrue(te.seekExact(new BytesRef("bar")));
- disi = TestUtil.docs(random(), te, null, disi, DocsEnum.FLAG_NONE);
+ disi = TestUtil.docs(random(), te, null, disi, PostingsEnum.FLAG_NONE);
docid = disi.docID();
assertEquals(-1, docid);
assertTrue(disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -361,7 +361,7 @@ public class TestDocsAndPositions extends LuceneTestCase {
writer.addDocument(doc);
DirectoryReader reader = writer.getReader();
LeafReader r = getOnlySegmentReader(reader);
- DocsAndPositionsEnum disi = r.termPositionsEnum(new Term("foo", "bar"));
+ PostingsEnum disi = r.termDocsEnum(new Term("foo", "bar"), PostingsEnum.FLAG_ALL);
int docid = disi.docID();
assertEquals(-1, docid);
assertTrue(disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -369,7 +369,7 @@ public class TestDocsAndPositions extends LuceneTestCase {
// now reuse and check again
TermsEnum te = r.terms("foo").iterator(null);
assertTrue(te.seekExact(new BytesRef("bar")));
- disi = te.docsAndPositions(null, disi);
+ disi = te.postings(null, disi, PostingsEnum.FLAG_ALL);
docid = disi.docID();
assertEquals(-1, docid);
assertTrue(disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDocumentWriter.java b/lucene/core/src/test/org/apache/lucene/index/TestDocumentWriter.java
index 37dc7983a3b..8f557759243 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDocumentWriter.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestDocumentWriter.java
@@ -125,7 +125,7 @@ public class TestDocumentWriter extends LuceneTestCase {
writer.close();
SegmentReader reader = new SegmentReader(info, newIOContext(random()));
- DocsAndPositionsEnum termPositions = MultiFields.getTermPositionsEnum(reader, MultiFields.getLiveDocs(reader),
+ PostingsEnum termPositions = MultiFields.getTermPositionsEnum(reader, MultiFields.getLiveDocs(reader),
"repeated", new BytesRef("repeated"));
assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
int freq = termPositions.freq();
@@ -197,7 +197,7 @@ public class TestDocumentWriter extends LuceneTestCase {
writer.close();
SegmentReader reader = new SegmentReader(info, newIOContext(random()));
- DocsAndPositionsEnum termPositions = MultiFields.getTermPositionsEnum(reader, reader.getLiveDocs(), "f1", new BytesRef("a"));
+ PostingsEnum termPositions = MultiFields.getTermPositionsEnum(reader, reader.getLiveDocs(), "f1", new BytesRef("a"));
assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
int freq = termPositions.freq();
assertEquals(3, freq);
@@ -239,18 +239,18 @@ public class TestDocumentWriter extends LuceneTestCase {
writer.close();
SegmentReader reader = new SegmentReader(info, newIOContext(random()));
- DocsAndPositionsEnum termPositions = reader.termPositionsEnum(new Term("preanalyzed", "term1"));
+ PostingsEnum termPositions = reader.termDocsEnum(new Term("preanalyzed", "term1"), PostingsEnum.FLAG_ALL);
assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(1, termPositions.freq());
assertEquals(0, termPositions.nextPosition());
- termPositions = reader.termPositionsEnum(new Term("preanalyzed", "term2"));
+ termPositions = reader.termDocsEnum(new Term("preanalyzed", "term2"), PostingsEnum.FLAG_ALL);
assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(2, termPositions.freq());
assertEquals(1, termPositions.nextPosition());
assertEquals(3, termPositions.nextPosition());
- termPositions = reader.termPositionsEnum(new Term("preanalyzed", "term3"));
+ termPositions = reader.termDocsEnum(new Term("preanalyzed", "term3"), PostingsEnum.FLAG_ALL);
assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(1, termPositions.freq());
assertEquals(2, termPositions.nextPosition());
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDuelingCodecs.java b/lucene/core/src/test/org/apache/lucene/index/TestDuelingCodecs.java
index 521dd902d45..5535b11df0a 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDuelingCodecs.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestDuelingCodecs.java
@@ -17,9 +17,6 @@ package org.apache.lucene.index;
* limitations under the License.
*/
-import java.io.IOException;
-import java.util.Random;
-
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.codecs.Codec;
import org.apache.lucene.document.Document;
@@ -33,6 +30,9 @@ import org.apache.lucene.util.LineFileDocs;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.TestUtil;
+import java.io.IOException;
+import java.util.Random;
+
/**
* Compares one codec against another
*/
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestFilterLeafReader.java b/lucene/core/src/test/org/apache/lucene/index/TestFilterLeafReader.java
index 1dc07e52712..d137d9b35e9 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestFilterLeafReader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestFilterLeafReader.java
@@ -76,14 +76,14 @@ public class TestFilterLeafReader extends LuceneTestCase {
}
@Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
- return new TestPositions(super.docsAndPositions(liveDocs, reuse == null ? null : ((FilterDocsAndPositionsEnum) reuse).in, flags));
+ public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException {
+ return new TestPositions(super.postings(liveDocs, reuse == null ? null : ((FilterDocsEnum) reuse).in, flags));
}
}
/** Filter that only returns odd numbered documents. */
- private static class TestPositions extends FilterDocsAndPositionsEnum {
- public TestPositions(DocsAndPositionsEnum in) {
+ private static class TestPositions extends FilterDocsEnum {
+ public TestPositions(PostingsEnum in) {
super(in);
}
@@ -151,7 +151,7 @@ public class TestFilterLeafReader extends LuceneTestCase {
assertEquals(TermsEnum.SeekStatus.FOUND, terms.seekCeil(new BytesRef("one")));
- DocsAndPositionsEnum positions = terms.docsAndPositions(MultiFields.getLiveDocs(reader), null);
+ PostingsEnum positions = terms.postings(MultiFields.getLiveDocs(reader), null, PostingsEnum.FLAG_ALL);
while (positions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
assertTrue((positions.docID() % 2) == 1);
}
@@ -189,7 +189,6 @@ public class TestFilterLeafReader extends LuceneTestCase {
checkOverrideMethods(FilterLeafReader.FilterTerms.class);
checkOverrideMethods(FilterLeafReader.FilterTermsEnum.class);
checkOverrideMethods(FilterLeafReader.FilterDocsEnum.class);
- checkOverrideMethods(FilterLeafReader.FilterDocsAndPositionsEnum.class);
}
public void testUnwrap() throws IOException {
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
index 1ba635583bf..7a222c43ffa 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
@@ -502,12 +502,12 @@ public class TestIndexWriter extends LuceneTestCase {
assertEquals(1, reader.numDocs());
Term t = new Term("field", "a");
assertEquals(1, reader.docFreq(t));
- DocsEnum td = TestUtil.docs(random(), reader,
+ PostingsEnum td = TestUtil.docs(random(), reader,
"field",
new BytesRef("a"),
MultiFields.getLiveDocs(reader),
null,
- DocsEnum.FLAG_FREQS);
+ PostingsEnum.FLAG_FREQS);
td.nextDoc();
assertEquals(128*1024, td.freq());
reader.close();
@@ -833,14 +833,14 @@ public class TestIndexWriter extends LuceneTestCase {
Terms tpv = r.getTermVectors(0).terms("field");
TermsEnum termsEnum = tpv.iterator(null);
assertNotNull(termsEnum.next());
- DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null);
+ PostingsEnum dpEnum = termsEnum.postings(null, null, PostingsEnum.FLAG_ALL);
assertNotNull(dpEnum);
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(1, dpEnum.freq());
assertEquals(100, dpEnum.nextPosition());
assertNotNull(termsEnum.next());
- dpEnum = termsEnum.docsAndPositions(null, dpEnum);
+ dpEnum = termsEnum.postings(null, dpEnum, PostingsEnum.FLAG_ALL);
assertNotNull(dpEnum);
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(1, dpEnum.freq());
@@ -1239,12 +1239,12 @@ public class TestIndexWriter extends LuceneTestCase {
// test that the terms were indexed.
- assertTrue(TestUtil.docs(random(), ir, "binary", new BytesRef("doc1field1"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
- assertTrue(TestUtil.docs(random(), ir, "binary", new BytesRef("doc2field1"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
- assertTrue(TestUtil.docs(random(), ir, "binary", new BytesRef("doc3field1"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
- assertTrue(TestUtil.docs(random(), ir, "string", new BytesRef("doc1field2"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
- assertTrue(TestUtil.docs(random(), ir, "string", new BytesRef("doc2field2"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
- assertTrue(TestUtil.docs(random(), ir, "string", new BytesRef("doc3field2"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
+ assertTrue(TestUtil.docs(random(), ir, "binary", new BytesRef("doc1field1"), null, null, PostingsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
+ assertTrue(TestUtil.docs(random(), ir, "binary", new BytesRef("doc2field1"), null, null, PostingsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
+ assertTrue(TestUtil.docs(random(), ir, "binary", new BytesRef("doc3field1"), null, null, PostingsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
+ assertTrue(TestUtil.docs(random(), ir, "string", new BytesRef("doc1field2"), null, null, PostingsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
+ assertTrue(TestUtil.docs(random(), ir, "string", new BytesRef("doc2field2"), null, null, PostingsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
+ assertTrue(TestUtil.docs(random(), ir, "string", new BytesRef("doc3field2"), null, null, PostingsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
ir.close();
dir.close();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
index b5a76c07f52..da25ce2230d 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
@@ -530,7 +530,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
// Make sure the doc that hit the exception was marked
// as deleted:
- DocsEnum tdocs = TestUtil.docs(random(), reader,
+ PostingsEnum tdocs = TestUtil.docs(random(), reader,
t.field(),
new BytesRef(t.text()),
MultiFields.getLiveDocs(reader),
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java
index 42e75dc5800..3ea6a42d5b0 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java
@@ -53,7 +53,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
public static int count(Term t, IndexReader r) throws IOException {
int count = 0;
- DocsEnum td = TestUtil.docs(random(), r,
+ PostingsEnum td = TestUtil.docs(random(), r,
t.field(), new BytesRef(t.text()),
MultiFields.getLiveDocs(r),
null,
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java
index 8a6c6ce7a1e..09f78301855 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java
@@ -244,7 +244,7 @@ public class TestIndexWriterWithThreads extends LuceneTestCase {
// Quick test to make sure index is not corrupt:
IndexReader reader = DirectoryReader.open(dir);
- DocsEnum tdocs = TestUtil.docs(random(), reader,
+ PostingsEnum tdocs = TestUtil.docs(random(), reader,
"field",
new BytesRef("aaa"),
MultiFields.getLiveDocs(reader),
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java
index 85c95dce29a..4a8e0a35b2b 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java
@@ -332,14 +332,14 @@ public class TestIndexableField extends LuceneTestCase {
TermsEnum termsEnum = tfv.iterator(null);
assertEquals(new BytesRef(""+counter), termsEnum.next());
assertEquals(1, termsEnum.totalTermFreq());
- DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null);
+ PostingsEnum dpEnum = termsEnum.postings(null, null, PostingsEnum.FLAG_ALL);
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(1, dpEnum.freq());
assertEquals(1, dpEnum.nextPosition());
assertEquals(new BytesRef("text"), termsEnum.next());
assertEquals(1, termsEnum.totalTermFreq());
- dpEnum = termsEnum.docsAndPositions(null, dpEnum);
+ dpEnum = termsEnum.postings(null, dpEnum, PostingsEnum.FLAG_ALL);
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(1, dpEnum.freq());
assertEquals(0, dpEnum.nextPosition());
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestLazyProxSkipping.java b/lucene/core/src/test/org/apache/lucene/index/TestLazyProxSkipping.java
index 9e4ae9c0289..43417764f7d 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestLazyProxSkipping.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestLazyProxSkipping.java
@@ -154,7 +154,7 @@ public class TestLazyProxSkipping extends LuceneTestCase {
writer.close();
IndexReader reader = DirectoryReader.open(directory);
- DocsAndPositionsEnum tp = MultiFields.getTermPositionsEnum(reader,
+ PostingsEnum tp = MultiFields.getTermPositionsEnum(reader,
MultiFields.getLiveDocs(reader),
this.field,
new BytesRef("b"));
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestLongPostings.java b/lucene/core/src/test/org/apache/lucene/index/TestLongPostings.java
index 0212f66d41c..7314396b184 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestLongPostings.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestLongPostings.java
@@ -167,7 +167,7 @@ public class TestLongPostings extends LuceneTestCase {
System.out.println("\nTEST: iter=" + iter + " doS1=" + doS1);
}
- final DocsAndPositionsEnum postings = MultiFields.getTermPositionsEnum(r, null, "field", new BytesRef(term));
+ final PostingsEnum postings = MultiFields.getTermPositionsEnum(r, null, "field", new BytesRef(term));
int docID = -1;
while(docID < DocIdSetIterator.NO_MORE_DOCS) {
@@ -370,14 +370,14 @@ public class TestLongPostings extends LuceneTestCase {
System.out.println("\nTEST: iter=" + iter + " doS1=" + doS1 + " term=" + term);
}
- final DocsEnum docs;
- final DocsEnum postings;
+ final PostingsEnum docs;
+ final PostingsEnum postings;
if (options == IndexOptions.DOCS) {
- docs = TestUtil.docs(random(), r, "field", new BytesRef(term), null, null, DocsEnum.FLAG_NONE);
+ docs = TestUtil.docs(random(), r, "field", new BytesRef(term), null, null, PostingsEnum.FLAG_NONE);
postings = null;
} else {
- docs = postings = TestUtil.docs(random(), r, "field", new BytesRef(term), null, null, DocsEnum.FLAG_FREQS);
+ docs = postings = TestUtil.docs(random(), r, "field", new BytesRef(term), null, null, PostingsEnum.FLAG_FREQS);
assert postings != null;
}
assert docs != null;
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java b/lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java
index a0b617082ea..72432b4aea3 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java
@@ -17,12 +17,24 @@ package org.apache.lucene.index;
* limitations under the License.
*/
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.store.*;
-import org.apache.lucene.util.*;
-import org.apache.lucene.document.*;
-import org.apache.lucene.analysis.*;
-import java.util.*;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
+import org.apache.lucene.util.UnicodeUtil;
public class TestMultiFields extends LuceneTestCase {
@@ -123,15 +135,15 @@ public class TestMultiFields extends LuceneTestCase {
System.out.println("TEST: seek term="+ UnicodeUtil.toHexString(term.utf8ToString()) + " " + term);
}
- DocsEnum docsEnum = TestUtil.docs(random(), reader, "field", term, liveDocs, null, DocsEnum.FLAG_NONE);
- assertNotNull(docsEnum);
+ PostingsEnum postingsEnum = TestUtil.docs(random(), reader, "field", term, liveDocs, null, PostingsEnum.FLAG_NONE);
+ assertNotNull(postingsEnum);
for(int docID : docs.get(term)) {
if (!deleted.contains(docID)) {
- assertEquals(docID, docsEnum.nextDoc());
+ assertEquals(docID, postingsEnum.nextDoc());
}
}
- assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsEnum.nextDoc());
+ assertEquals(DocIdSetIterator.NO_MORE_DOCS, postingsEnum.nextDoc());
}
reader.close();
@@ -164,8 +176,8 @@ public class TestMultiFields extends LuceneTestCase {
w.addDocument(d);
IndexReader r = w.getReader();
w.close();
- DocsEnum d1 = TestUtil.docs(random(), r, "f", new BytesRef("j"), null, null, DocsEnum.FLAG_NONE);
- DocsEnum d2 = TestUtil.docs(random(), r, "f", new BytesRef("j"), null, null, DocsEnum.FLAG_NONE);
+ PostingsEnum d1 = TestUtil.docs(random(), r, "f", new BytesRef("j"), null, null, PostingsEnum.FLAG_NONE);
+ PostingsEnum d2 = TestUtil.docs(random(), r, "f", new BytesRef("j"), null, null, PostingsEnum.FLAG_NONE);
assertEquals(0, d1.nextDoc());
assertEquals(0, d2.nextDoc());
r.close();
@@ -182,7 +194,7 @@ public class TestMultiFields extends LuceneTestCase {
w.addDocument(d);
IndexReader r = w.getReader();
w.close();
- DocsEnum de = MultiFields.getTermDocsEnum(r, null, "f", new BytesRef("j"));
+ PostingsEnum de = MultiFields.getTermDocsEnum(r, null, "f", new BytesRef("j"));
assertEquals(0, de.nextDoc());
assertEquals(1, de.nextDoc());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, de.nextDoc());
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java b/lucene/core/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java
index d1a9cd85cc7..b94728064b3 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java
@@ -84,7 +84,7 @@ public class TestMultiLevelSkipList extends LuceneTestCase {
for (int i = 0; i < 2; i++) {
counter = 0;
- DocsAndPositionsEnum tp = reader.termPositionsEnum(term);
+ PostingsEnum tp = reader.termDocsEnum(term, PostingsEnum.FLAG_ALL);
checkSkipTo(tp, 14, 185); // no skips
checkSkipTo(tp, 17, 190); // one skip on level 0
checkSkipTo(tp, 287, 200); // one skip on level 1, two on level 0
@@ -95,7 +95,7 @@ public class TestMultiLevelSkipList extends LuceneTestCase {
}
}
- public void checkSkipTo(DocsAndPositionsEnum tp, int target, int maxCounter) throws IOException {
+ public void checkSkipTo(PostingsEnum tp, int target, int maxCounter) throws IOException {
tp.advance(target);
if (maxCounter < counter) {
fail("Too many bytes read: " + counter + " vs " + maxCounter);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestOmitPositions.java b/lucene/core/src/test/org/apache/lucene/index/TestOmitPositions.java
index fb58903faa7..6f7d39bee4e 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestOmitPositions.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestOmitPositions.java
@@ -53,7 +53,7 @@ public class TestOmitPositions extends LuceneTestCase {
assertNull(MultiFields.getTermPositionsEnum(reader, null, "foo", new BytesRef("test")));
- DocsEnum de = TestUtil.docs(random(), reader, "foo", new BytesRef("test"), null, null, DocsEnum.FLAG_FREQS);
+ PostingsEnum de = TestUtil.docs(random(), reader, "foo", new BytesRef("test"), null, null, PostingsEnum.FLAG_FREQS);
while (de.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
assertEquals(2, de.freq());
}
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestParallelTermEnum.java b/lucene/core/src/test/org/apache/lucene/index/TestParallelTermEnum.java
index 7fd9182767c..b1cdedcebc9 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestParallelTermEnum.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestParallelTermEnum.java
@@ -80,7 +80,7 @@ public class TestParallelTermEnum extends LuceneTestCase {
BytesRef b = te.next();
assertNotNull(b);
assertEquals(t, b.utf8ToString());
- DocsEnum td = TestUtil.docs(random(), te, liveDocs, null, DocsEnum.FLAG_NONE);
+ PostingsEnum td = TestUtil.docs(random(), te, liveDocs, null, PostingsEnum.FLAG_NONE);
assertTrue(td.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(0, td.docID());
assertEquals(td.nextDoc(), DocIdSetIterator.NO_MORE_DOCS);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java b/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java
index fb0d142032a..9e3774cdd60 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java
@@ -26,17 +26,24 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import org.apache.lucene.analysis.*;
-import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.CannedTokenStream;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.Token;
+import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.TestUtil;
@@ -183,14 +190,14 @@ public class TestPayloads extends LuceneTestCase {
byte[] verifyPayloadData = new byte[payloadDataLength];
offset = 0;
- DocsAndPositionsEnum[] tps = new DocsAndPositionsEnum[numTerms];
+ PostingsEnum[] tps = new PostingsEnum[numTerms];
for (int i = 0; i < numTerms; i++) {
tps[i] = MultiFields.getTermPositionsEnum(reader,
MultiFields.getLiveDocs(reader),
terms[i].field(),
new BytesRef(terms[i].text()));
}
-
+
while (tps[0].nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
for (int i = 1; i < numTerms; i++) {
tps[i].nextDoc();
@@ -208,13 +215,13 @@ public class TestPayloads extends LuceneTestCase {
}
}
}
-
+
assertByteArrayEquals(payloadData, verifyPayloadData);
/*
* test lazy skipping
*/
- DocsAndPositionsEnum tp = MultiFields.getTermPositionsEnum(reader,
+ PostingsEnum tp = MultiFields.getTermPositionsEnum(reader,
MultiFields.getLiveDocs(reader),
terms[0].field(),
new BytesRef(terms[0].text()));
@@ -237,7 +244,6 @@ public class TestPayloads extends LuceneTestCase {
payload = tp.getPayload();
assertEquals("Wrong payload length.", 1, payload.length);
assertEquals(payload.bytes[payload.offset], payloadData[5 * numTerms]);
-
/*
* Test different lengths at skip points
@@ -448,7 +454,7 @@ public class TestPayloads extends LuceneTestCase {
final int numThreads = 5;
final int numDocs = atLeast(50);
final ByteArrayPool pool = new ByteArrayPool(numThreads, 5);
-
+
Directory dir = newDirectory();
final IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
final String field = "test";
@@ -480,10 +486,10 @@ public class TestPayloads extends LuceneTestCase {
IndexReader reader = DirectoryReader.open(dir);
TermsEnum terms = MultiFields.getFields(reader).terms(field).iterator(null);
Bits liveDocs = MultiFields.getLiveDocs(reader);
- DocsAndPositionsEnum tp = null;
+ PostingsEnum tp = null;
while (terms.next() != null) {
String termText = terms.term().utf8ToString();
- tp = terms.docsAndPositions(liveDocs, tp);
+ tp = terms.postings(liveDocs, tp, PostingsEnum.FLAG_PAYLOADS);
while(tp.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
int freq = tp.freq();
for (int i = 0; i < freq; i++) {
@@ -603,7 +609,7 @@ public class TestPayloads extends LuceneTestCase {
writer.addDocument(doc);
DirectoryReader reader = writer.getReader();
LeafReader sr = SlowCompositeReaderWrapper.wrap(reader);
- DocsAndPositionsEnum de = sr.termPositionsEnum(new Term("field", "withPayload"));
+ PostingsEnum de = sr.termDocsEnum(new Term("field", "withPayload"), PostingsEnum.FLAG_POSITIONS);
de.nextDoc();
de.nextPosition();
assertEquals(new BytesRef("test"), de.getPayload());
@@ -637,7 +643,7 @@ public class TestPayloads extends LuceneTestCase {
writer.addDocument(doc);
DirectoryReader reader = writer.getReader();
SegmentReader sr = getOnlySegmentReader(reader);
- DocsAndPositionsEnum de = sr.termPositionsEnum(new Term("field", "withPayload"));
+ PostingsEnum de = sr.termDocsEnum(new Term("field", "withPayload"), PostingsEnum.FLAG_POSITIONS);
de.nextDoc();
de.nextPosition();
assertEquals(new BytesRef("test"), de.getPayload());
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPayloadsOnVectors.java b/lucene/core/src/test/org/apache/lucene/index/TestPayloadsOnVectors.java
index 3fa9a81be60..08a2d4199fd 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestPayloadsOnVectors.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestPayloadsOnVectors.java
@@ -72,7 +72,7 @@ public class TestPayloadsOnVectors extends LuceneTestCase {
assert terms != null;
TermsEnum termsEnum = terms.iterator(null);
assertTrue(termsEnum.seekExact(new BytesRef("withPayload")));
- DocsAndPositionsEnum de = termsEnum.docsAndPositions(null, null);
+ PostingsEnum de = termsEnum.postings(null, null, PostingsEnum.FLAG_ALL);
assertEquals(0, de.nextDoc());
assertEquals(0, de.nextPosition());
assertEquals(new BytesRef("test"), de.getPayload());
@@ -114,7 +114,7 @@ public class TestPayloadsOnVectors extends LuceneTestCase {
assert terms != null;
TermsEnum termsEnum = terms.iterator(null);
assertTrue(termsEnum.seekExact(new BytesRef("withPayload")));
- DocsAndPositionsEnum de = termsEnum.docsAndPositions(null, null);
+ PostingsEnum de = termsEnum.postings(null, null, PostingsEnum.FLAG_ALL);
assertEquals(0, de.nextDoc());
assertEquals(3, de.nextPosition());
assertEquals(new BytesRef("test"), de.getPayload());
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java b/lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java
index 6c5c2d373bc..135ec6c7914 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java
@@ -226,16 +226,16 @@ public class TestPerSegmentDeletes extends LuceneTestCase {
Terms cterms = fields.terms(term.field);
TermsEnum ctermsEnum = cterms.iterator(null);
if (ctermsEnum.seekExact(new BytesRef(term.text()))) {
- DocsEnum docsEnum = TestUtil.docs(random(), ctermsEnum, bits, null, DocsEnum.FLAG_NONE);
- return toArray(docsEnum);
+ PostingsEnum postingsEnum = TestUtil.docs(random(), ctermsEnum, bits, null, PostingsEnum.FLAG_NONE);
+ return toArray(postingsEnum);
}
return null;
}
- public static int[] toArray(DocsEnum docsEnum) throws IOException {
+ public static int[] toArray(PostingsEnum postingsEnum) throws IOException {
List docs = new ArrayList<>();
- while (docsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
- int docID = docsEnum.docID();
+ while (postingsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
+ int docID = postingsEnum.docID();
docs.add(docID);
}
return ArrayUtil.toIntArray(docs);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java b/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java
index f7eac407584..1728b1a5396 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java
@@ -82,7 +82,7 @@ public class TestPostingsOffsets extends LuceneTestCase {
IndexReader r = w.getReader();
w.close();
- DocsAndPositionsEnum dp = MultiFields.getTermPositionsEnum(r, null, "content", new BytesRef("a"));
+ PostingsEnum dp = MultiFields.getTermPositionsEnum(r, null, "content", new BytesRef("a"));
assertNotNull(dp);
assertEquals(0, dp.nextDoc());
assertEquals(2, dp.freq());
@@ -154,7 +154,7 @@ public class TestPostingsOffsets extends LuceneTestCase {
String terms[] = { "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "hundred" };
for (String term : terms) {
- DocsAndPositionsEnum dp = MultiFields.getTermPositionsEnum(reader, null, "numbers", new BytesRef(term));
+ PostingsEnum dp = MultiFields.getTermPositionsEnum(reader, null, "numbers", new BytesRef(term));
int doc;
while((doc = dp.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
String storedNumbers = reader.document(doc).get("numbers");
@@ -182,7 +182,7 @@ public class TestPostingsOffsets extends LuceneTestCase {
for (int j = 0; j < numSkippingTests; j++) {
int num = TestUtil.nextInt(random(), 100, Math.min(numDocs - 1, 999));
- DocsAndPositionsEnum dp = MultiFields.getTermPositionsEnum(reader, null, "numbers", new BytesRef("hundred"));
+ PostingsEnum dp = MultiFields.getTermPositionsEnum(reader, null, "numbers", new BytesRef("hundred"));
int doc = dp.advance(num);
assertEquals(num, doc);
int freq = dp.freq();
@@ -207,7 +207,7 @@ public class TestPostingsOffsets extends LuceneTestCase {
// check that other fields (without offsets) work correctly
for (int i = 0; i < numDocs; i++) {
- DocsEnum dp = MultiFields.getTermDocsEnum(reader, null, "id", new BytesRef("" + i), 0);
+ PostingsEnum dp = MultiFields.getTermDocsEnum(reader, null, "id", new BytesRef("" + i), 0);
assertEquals(i, dp.nextDoc());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, dp.nextDoc());
}
@@ -294,14 +294,14 @@ public class TestPostingsOffsets extends LuceneTestCase {
LeafReader sub = ctx.reader();
//System.out.println("\nsub=" + sub);
final TermsEnum termsEnum = sub.fields().terms("content").iterator(null);
- DocsEnum docs = null;
- DocsAndPositionsEnum docsAndPositions = null;
- DocsAndPositionsEnum docsAndPositionsAndOffsets = null;
+ PostingsEnum docs = null;
+ PostingsEnum docsAndPositions = null;
+ PostingsEnum docsAndPositionsAndOffsets = null;
final NumericDocValues docIDToID = DocValues.getNumeric(sub, "id");
for(String term : terms) {
//System.out.println(" term=" + term);
if (termsEnum.seekExact(new BytesRef(term))) {
- docs = termsEnum.docs(null, docs);
+ docs = termsEnum.postings(null, docs);
assertNotNull(docs);
int doc;
//System.out.println(" doc/freq");
@@ -313,7 +313,7 @@ public class TestPostingsOffsets extends LuceneTestCase {
}
// explicitly exclude offsets here
- docsAndPositions = termsEnum.docsAndPositions(null, docsAndPositions, DocsAndPositionsEnum.FLAG_PAYLOADS);
+ docsAndPositions = termsEnum.postings(null, docsAndPositions, PostingsEnum.FLAG_ALL);
assertNotNull(docsAndPositions);
//System.out.println(" doc/freq/pos");
while((doc = docsAndPositions.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
@@ -328,7 +328,7 @@ public class TestPostingsOffsets extends LuceneTestCase {
}
}
- docsAndPositionsAndOffsets = termsEnum.docsAndPositions(null, docsAndPositions);
+ docsAndPositionsAndOffsets = termsEnum.postings(null, docsAndPositions, PostingsEnum.FLAG_ALL);
assertNotNull(docsAndPositionsAndOffsets);
//System.out.println(" doc/freq/pos/offs");
while((doc = docsAndPositionsAndOffsets.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java b/lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java
index d7b577fcf90..0108502e670 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java
@@ -106,7 +106,7 @@ public class TestSegmentMerger extends LuceneTestCase {
assertTrue(newDoc2 != null);
assertTrue(DocHelper.numFields(newDoc2) == DocHelper.numFields(doc2) - DocHelper.unstored.size());
- DocsEnum termDocs = TestUtil.docs(random(), mergedReader,
+ PostingsEnum termDocs = TestUtil.docs(random(), mergedReader,
DocHelper.TEXT_FIELD_2_KEY,
new BytesRef("field"),
MultiFields.getLiveDocs(mergedReader),
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java b/lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java
index 918b915b272..5abf2c4469c 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java
@@ -127,7 +127,7 @@ public class TestSegmentReader extends LuceneTestCase {
}
}
- DocsEnum termDocs = TestUtil.docs(random(), reader,
+ PostingsEnum termDocs = TestUtil.docs(random(), reader,
DocHelper.TEXT_FIELD_1_KEY,
new BytesRef("field"),
MultiFields.getLiveDocs(reader),
@@ -145,7 +145,7 @@ public class TestSegmentReader extends LuceneTestCase {
assertTrue(termDocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
- DocsAndPositionsEnum positions = MultiFields.getTermPositionsEnum(reader,
+ PostingsEnum positions = MultiFields.getTermPositionsEnum(reader,
MultiFields.getLiveDocs(reader),
DocHelper.TEXT_FIELD_1_KEY,
new BytesRef("field"));
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermDocs.java b/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermDocs.java
index 3f5686e428d..b67f111bff3 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermDocs.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermDocs.java
@@ -58,7 +58,7 @@ public class TestSegmentTermDocs extends LuceneTestCase {
TermsEnum terms = reader.fields().terms(DocHelper.TEXT_FIELD_2_KEY).iterator(null);
terms.seekCeil(new BytesRef("field"));
- DocsEnum termDocs = TestUtil.docs(random(), terms, reader.getLiveDocs(), null, DocsEnum.FLAG_FREQS);
+ PostingsEnum termDocs = TestUtil.docs(random(), terms, reader.getLiveDocs(), null, PostingsEnum.FLAG_FREQS);
if (termDocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
int docId = termDocs.docID();
assertTrue(docId == 0);
@@ -73,7 +73,7 @@ public class TestSegmentTermDocs extends LuceneTestCase {
//After adding the document, we should be able to read it back in
SegmentReader reader = new SegmentReader(info, newIOContext(random()));
assertTrue(reader != null);
- DocsEnum termDocs = TestUtil.docs(random(), reader,
+ PostingsEnum termDocs = TestUtil.docs(random(), reader,
"textField2",
new BytesRef("bad"),
reader.getLiveDocs(),
@@ -87,7 +87,7 @@ public class TestSegmentTermDocs extends LuceneTestCase {
//After adding the document, we should be able to read it back in
SegmentReader reader = new SegmentReader(info, newIOContext(random()));
assertTrue(reader != null);
- DocsEnum termDocs = TestUtil.docs(random(), reader,
+ PostingsEnum termDocs = TestUtil.docs(random(), reader,
"junk",
new BytesRef("bad"),
reader.getLiveDocs(),
@@ -121,12 +121,12 @@ public class TestSegmentTermDocs extends LuceneTestCase {
IndexReader reader = DirectoryReader.open(dir);
- DocsEnum tdocs = TestUtil.docs(random(), reader,
+ PostingsEnum tdocs = TestUtil.docs(random(), reader,
ta.field(),
new BytesRef(ta.text()),
MultiFields.getLiveDocs(reader),
null,
- DocsEnum.FLAG_FREQS);
+ PostingsEnum.FLAG_FREQS);
// without optimization (assumption skipInterval == 16)
@@ -169,7 +169,7 @@ public class TestSegmentTermDocs extends LuceneTestCase {
new BytesRef(tb.text()),
MultiFields.getLiveDocs(reader),
null,
- DocsEnum.FLAG_FREQS);
+ PostingsEnum.FLAG_FREQS);
assertTrue(tdocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(10, tdocs.docID());
@@ -193,7 +193,7 @@ public class TestSegmentTermDocs extends LuceneTestCase {
new BytesRef(tb.text()),
MultiFields.getLiveDocs(reader),
null,
- DocsEnum.FLAG_FREQS);
+ PostingsEnum.FLAG_FREQS);
assertTrue(tdocs.advance(5) != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(10, tdocs.docID());
@@ -213,7 +213,7 @@ public class TestSegmentTermDocs extends LuceneTestCase {
new BytesRef(tc.text()),
MultiFields.getLiveDocs(reader),
null,
- DocsEnum.FLAG_FREQS);
+ PostingsEnum.FLAG_FREQS);
assertTrue(tdocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(26, tdocs.docID());
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestStressAdvance.java b/lucene/core/src/test/org/apache/lucene/index/TestStressAdvance.java
index bd35b503a28..a28e3d08eaa 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestStressAdvance.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestStressAdvance.java
@@ -76,17 +76,17 @@ public class TestStressAdvance extends LuceneTestCase {
}
final TermsEnum te = getOnlySegmentReader(r).fields().terms("field").iterator(null);
- DocsEnum de = null;
+ PostingsEnum de = null;
for(int iter2=0;iter2<10;iter2++) {
if (VERBOSE) {
System.out.println("\nTEST: iter=" + iter + " iter2=" + iter2);
}
assertEquals(TermsEnum.SeekStatus.FOUND, te.seekCeil(new BytesRef("a")));
- de = TestUtil.docs(random(), te, null, de, DocsEnum.FLAG_NONE);
+ de = TestUtil.docs(random(), te, null, de, PostingsEnum.FLAG_NONE);
testOne(de, aDocIDs);
assertEquals(TermsEnum.SeekStatus.FOUND, te.seekCeil(new BytesRef("b")));
- de = TestUtil.docs(random(), te, null, de, DocsEnum.FLAG_NONE);
+ de = TestUtil.docs(random(), te, null, de, PostingsEnum.FLAG_NONE);
testOne(de, bDocIDs);
}
@@ -96,7 +96,7 @@ public class TestStressAdvance extends LuceneTestCase {
}
}
- private void testOne(DocsEnum docs, List expected) throws Exception {
+ private void testOne(PostingsEnum docs, List expected) throws Exception {
if (VERBOSE) {
System.out.println("test");
}
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java b/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java
index 926d8dbea46..13a4577fcd8 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java
@@ -325,9 +325,9 @@ public class TestStressIndexing2 extends LuceneTestCase {
// make sure r1 is in fact empty (eg has only all
// deleted docs):
Bits liveDocs = MultiFields.getLiveDocs(r1);
- DocsEnum docs = null;
+ PostingsEnum docs = null;
while(termsEnum.next() != null) {
- docs = TestUtil.docs(random(), termsEnum, liveDocs, docs, DocsEnum.FLAG_NONE);
+ docs = TestUtil.docs(random(), termsEnum, liveDocs, docs, PostingsEnum.FLAG_NONE);
while(docs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
fail("r1 is not empty but r2 is");
}
@@ -336,8 +336,8 @@ public class TestStressIndexing2 extends LuceneTestCase {
}
TermsEnum termsEnum2 = terms2.iterator(null);
- DocsEnum termDocs1 = null;
- DocsEnum termDocs2 = null;
+ PostingsEnum termDocs1 = null;
+ PostingsEnum termDocs2 = null;
while(true) {
BytesRef term = termsEnum.next();
@@ -346,9 +346,9 @@ public class TestStressIndexing2 extends LuceneTestCase {
break;
}
- termDocs1 = TestUtil.docs(random(), termsEnum, liveDocs1, termDocs1, DocsEnum.FLAG_NONE);
+ termDocs1 = TestUtil.docs(random(), termsEnum, liveDocs1, termDocs1, PostingsEnum.FLAG_NONE);
if (termsEnum2.seekExact(term)) {
- termDocs2 = TestUtil.docs(random(), termsEnum2, liveDocs2, termDocs2, DocsEnum.FLAG_NONE);
+ termDocs2 = TestUtil.docs(random(), termsEnum2, liveDocs2, termDocs2, PostingsEnum.FLAG_NONE);
} else {
termDocs2 = null;
}
@@ -386,8 +386,8 @@ public class TestStressIndexing2 extends LuceneTestCase {
Fields tv1 = r1.getTermVectors(id1);
System.out.println(" d1=" + tv1);
if (tv1 != null) {
- DocsAndPositionsEnum dpEnum = null;
- DocsEnum dEnum = null;
+ PostingsEnum dpEnum = null;
+ PostingsEnum dEnum = null;
for (String field : tv1) {
System.out.println(" " + field + ":");
Terms terms3 = tv1.terms(field);
@@ -396,7 +396,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
BytesRef term2;
while((term2 = termsEnum3.next()) != null) {
System.out.println(" " + term2.utf8ToString() + ": freq=" + termsEnum3.totalTermFreq());
- dpEnum = termsEnum3.docsAndPositions(null, dpEnum);
+ dpEnum = termsEnum3.postings(null, dpEnum, PostingsEnum.FLAG_ALL);
if (dpEnum != null) {
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
final int freq = dpEnum.freq();
@@ -405,7 +405,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
System.out.println(" pos=" + dpEnum.nextPosition());
}
} else {
- dEnum = TestUtil.docs(random(), termsEnum3, null, dEnum, DocsEnum.FLAG_FREQS);
+ dEnum = TestUtil.docs(random(), termsEnum3, null, dEnum, PostingsEnum.FLAG_FREQS);
assertNotNull(dEnum);
assertTrue(dEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
final int freq = dEnum.freq();
@@ -418,8 +418,8 @@ public class TestStressIndexing2 extends LuceneTestCase {
Fields tv2 = r2.getTermVectors(id2);
System.out.println(" d2=" + tv2);
if (tv2 != null) {
- DocsAndPositionsEnum dpEnum = null;
- DocsEnum dEnum = null;
+ PostingsEnum dpEnum = null;
+ PostingsEnum dEnum = null;
for (String field : tv2) {
System.out.println(" " + field + ":");
Terms terms3 = tv2.terms(field);
@@ -428,7 +428,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
BytesRef term2;
while((term2 = termsEnum3.next()) != null) {
System.out.println(" " + term2.utf8ToString() + ": freq=" + termsEnum3.totalTermFreq());
- dpEnum = termsEnum3.docsAndPositions(null, dpEnum);
+ dpEnum = termsEnum3.postings(null, dpEnum, PostingsEnum.FLAG_ALL);
if (dpEnum != null) {
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
final int freq = dpEnum.freq();
@@ -437,7 +437,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
System.out.println(" pos=" + dpEnum.nextPosition());
}
} else {
- dEnum = TestUtil.docs(random(), termsEnum3, null, dEnum, DocsEnum.FLAG_FREQS);
+ dEnum = TestUtil.docs(random(), termsEnum3, null, dEnum, PostingsEnum.FLAG_FREQS);
assertNotNull(dEnum);
assertTrue(dEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
final int freq = dEnum.freq();
@@ -464,7 +464,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
String field1=null, field2=null;
TermsEnum termsEnum1 = null;
termsEnum2 = null;
- DocsEnum docs1=null, docs2=null;
+ PostingsEnum docs1=null, docs2=null;
// pack both doc and freq into single element for easy sorting
long[] info1 = new long[r1.numDocs()];
@@ -496,7 +496,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
}
//System.out.println("TEST: term1=" + term1);
- docs1 = TestUtil.docs(random(), termsEnum1, liveDocs1, docs1, DocsEnum.FLAG_FREQS);
+ docs1 = TestUtil.docs(random(), termsEnum1, liveDocs1, docs1, PostingsEnum.FLAG_FREQS);
while (docs1.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
int d = docs1.docID();
int f = docs1.freq();
@@ -529,7 +529,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
}
//System.out.println("TEST: term1=" + term1);
- docs2 = TestUtil.docs(random(), termsEnum2, liveDocs2, docs2, DocsEnum.FLAG_FREQS);
+ docs2 = TestUtil.docs(random(), termsEnum2, liveDocs2, docs2, PostingsEnum.FLAG_FREQS);
while (docs2.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
int d = r2r1[docs2.docID()];
int f = docs2.freq();
@@ -606,10 +606,10 @@ public class TestStressIndexing2 extends LuceneTestCase {
assertNotNull(terms2);
TermsEnum termsEnum2 = terms2.iterator(null);
- DocsAndPositionsEnum dpEnum1 = null;
- DocsAndPositionsEnum dpEnum2 = null;
- DocsEnum dEnum1 = null;
- DocsEnum dEnum2 = null;
+ PostingsEnum dpEnum1 = null;
+ PostingsEnum dpEnum2 = null;
+ PostingsEnum dEnum1 = null;
+ PostingsEnum dEnum2 = null;
BytesRef term1;
while ((term1 = termsEnum1.next()) != null) {
@@ -618,8 +618,8 @@ public class TestStressIndexing2 extends LuceneTestCase {
assertEquals(termsEnum1.totalTermFreq(),
termsEnum2.totalTermFreq());
- dpEnum1 = termsEnum1.docsAndPositions(null, dpEnum1);
- dpEnum2 = termsEnum2.docsAndPositions(null, dpEnum2);
+ dpEnum1 = termsEnum1.postings(null, dpEnum1, PostingsEnum.FLAG_ALL);
+ dpEnum2 = termsEnum2.postings(null, dpEnum2, PostingsEnum.FLAG_ALL);
if (dpEnum1 != null) {
assertNotNull(dpEnum2);
int docID1 = dpEnum1.nextDoc();
@@ -655,8 +655,8 @@ public class TestStressIndexing2 extends LuceneTestCase {
assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum1.nextDoc());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum2.nextDoc());
} else {
- dEnum1 = TestUtil.docs(random(), termsEnum1, null, dEnum1, DocsEnum.FLAG_FREQS);
- dEnum2 = TestUtil.docs(random(), termsEnum2, null, dEnum2, DocsEnum.FLAG_FREQS);
+ dEnum1 = TestUtil.docs(random(), termsEnum1, null, dEnum1, PostingsEnum.FLAG_FREQS);
+ dEnum2 = TestUtil.docs(random(), termsEnum2, null, dEnum2, PostingsEnum.FLAG_FREQS);
assertNotNull(dEnum1);
assertNotNull(dEnum2);
int docID1 = dEnum1.nextDoc();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTermVectors.java b/lucene/core/src/test/org/apache/lucene/index/TestTermVectors.java
index 356353ea57e..d4bbf445d56 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestTermVectors.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestTermVectors.java
@@ -17,8 +17,6 @@ package org.apache.lucene.index;
* limitations under the License.
*/
-import java.io.IOException;
-
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.document.Document;
@@ -37,6 +35,8 @@ import org.apache.lucene.util.TestUtil;
import org.junit.AfterClass;
import org.junit.BeforeClass;
+import java.io.IOException;
+
public class TestTermVectors extends LuceneTestCase {
private static IndexReader reader;
private static Directory directory;
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java b/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java
index a8adfb7161f..0d43cab34d4 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java
@@ -220,7 +220,7 @@ public class TestTermVectorsReader extends LuceneTestCase {
assertNotNull(vector);
assertEquals(testTerms.length, vector.size());
TermsEnum termsEnum = vector.iterator(null);
- DocsEnum docsEnum = null;
+ PostingsEnum postingsEnum = null;
for (int i = 0; i < testTerms.length; i++) {
final BytesRef text = termsEnum.next();
assertNotNull(text);
@@ -228,12 +228,12 @@ public class TestTermVectorsReader extends LuceneTestCase {
//System.out.println("Term: " + term);
assertEquals(testTerms[i], term);
- docsEnum = TestUtil.docs(random(), termsEnum, null, docsEnum, DocsEnum.FLAG_NONE);
- assertNotNull(docsEnum);
- int doc = docsEnum.docID();
+ postingsEnum = TestUtil.docs(random(), termsEnum, null, postingsEnum, PostingsEnum.FLAG_NONE);
+ assertNotNull(postingsEnum);
+ int doc = postingsEnum.docID();
assertEquals(-1, doc);
- assertTrue(docsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
- assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsEnum.nextDoc());
+ assertTrue(postingsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
+ assertEquals(DocIdSetIterator.NO_MORE_DOCS, postingsEnum.nextDoc());
}
assertNull(termsEnum.next());
}
@@ -247,7 +247,7 @@ public class TestTermVectorsReader extends LuceneTestCase {
assertNotNull(vector);
assertEquals(testTerms.length, vector.size());
TermsEnum termsEnum = vector.iterator(null);
- DocsAndPositionsEnum dpEnum = null;
+ PostingsEnum dpEnum = null;
for (int i = 0; i < testTerms.length; i++) {
final BytesRef text = termsEnum.next();
assertNotNull(text);
@@ -255,7 +255,7 @@ public class TestTermVectorsReader extends LuceneTestCase {
//System.out.println("Term: " + term);
assertEquals(testTerms[i], term);
- dpEnum = termsEnum.docsAndPositions(null, dpEnum);
+ dpEnum = termsEnum.postings(null, dpEnum, PostingsEnum.FLAG_ALL);
assertNotNull(dpEnum);
int doc = dpEnum.docID();
assertEquals(-1, doc);
@@ -266,7 +266,7 @@ public class TestTermVectorsReader extends LuceneTestCase {
}
assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum.nextDoc());
- dpEnum = termsEnum.docsAndPositions(null, dpEnum);
+ dpEnum = termsEnum.postings(null, dpEnum, PostingsEnum.FLAG_ALL);
doc = dpEnum.docID();
assertEquals(-1, doc);
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -291,8 +291,8 @@ public class TestTermVectorsReader extends LuceneTestCase {
String term = text.utf8ToString();
//System.out.println("Term: " + term);
assertEquals(testTerms[i], term);
- assertNotNull(termsEnum.docs(null, null));
- assertNull(termsEnum.docsAndPositions(null, null)); // no pos
+ assertNotNull(termsEnum.postings(null, null));
+ assertNull(termsEnum.postings(null, null, PostingsEnum.FLAG_ALL)); // no pos
}
reader.close();
}
@@ -304,14 +304,14 @@ public class TestTermVectorsReader extends LuceneTestCase {
TermsEnum termsEnum = vector.iterator(null);
assertNotNull(termsEnum);
assertEquals(testTerms.length, vector.size());
- DocsAndPositionsEnum dpEnum = null;
+ PostingsEnum dpEnum = null;
for (int i = 0; i < testTerms.length; i++) {
final BytesRef text = termsEnum.next();
assertNotNull(text);
String term = text.utf8ToString();
assertEquals(testTerms[i], term);
- dpEnum = termsEnum.docsAndPositions(null, dpEnum);
+ dpEnum = termsEnum.postings(null, dpEnum, PostingsEnum.FLAG_ALL);
assertNotNull(dpEnum);
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(dpEnum.freq(), positions[i].length);
@@ -320,7 +320,7 @@ public class TestTermVectorsReader extends LuceneTestCase {
}
assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum.nextDoc());
- dpEnum = termsEnum.docsAndPositions(null, dpEnum);
+ dpEnum = termsEnum.postings(null, dpEnum, PostingsEnum.FLAG_ALL);
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertNotNull(dpEnum);
assertEquals(dpEnum.freq(), positions[i].length);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java b/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java
index edd1c9ee23c..1a9ca1f6c94 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java
@@ -68,7 +68,7 @@ public class TestTermVectorsWriter extends LuceneTestCase {
// Token "" occurred once
assertEquals(1, termsEnum.totalTermFreq());
- DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null);
+ PostingsEnum dpEnum = termsEnum.postings(null, null, PostingsEnum.FLAG_ALL);
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
dpEnum.nextPosition();
assertEquals(8, dpEnum.startOffset());
@@ -77,7 +77,7 @@ public class TestTermVectorsWriter extends LuceneTestCase {
// Token "abcd" occurred three times
assertEquals(new BytesRef("abcd"), termsEnum.next());
- dpEnum = termsEnum.docsAndPositions(null, dpEnum);
+ dpEnum = termsEnum.postings(null, dpEnum, PostingsEnum.FLAG_ALL);
assertEquals(3, termsEnum.totalTermFreq());
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -117,7 +117,7 @@ public class TestTermVectorsWriter extends LuceneTestCase {
IndexReader r = DirectoryReader.open(dir);
TermsEnum termsEnum = r.getTermVectors(0).terms("field").iterator(null);
assertNotNull(termsEnum.next());
- DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null);
+ PostingsEnum dpEnum = termsEnum.postings(null, null, PostingsEnum.FLAG_ALL);
assertEquals(2, termsEnum.totalTermFreq());
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -152,7 +152,7 @@ public class TestTermVectorsWriter extends LuceneTestCase {
IndexReader r = DirectoryReader.open(dir);
TermsEnum termsEnum = r.getTermVectors(0).terms("field").iterator(null);
assertNotNull(termsEnum.next());
- DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null);
+ PostingsEnum dpEnum = termsEnum.postings(null, null, PostingsEnum.FLAG_ALL);
assertEquals(2, termsEnum.totalTermFreq());
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -190,7 +190,7 @@ public class TestTermVectorsWriter extends LuceneTestCase {
IndexReader r = DirectoryReader.open(dir);
TermsEnum termsEnum = r.getTermVectors(0).terms("field").iterator(null);
assertNotNull(termsEnum.next());
- DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null);
+ PostingsEnum dpEnum = termsEnum.postings(null, null, PostingsEnum.FLAG_ALL);
assertEquals(2, termsEnum.totalTermFreq());
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -225,7 +225,7 @@ public class TestTermVectorsWriter extends LuceneTestCase {
IndexReader r = DirectoryReader.open(dir);
TermsEnum termsEnum = r.getTermVectors(0).terms("field").iterator(null);
assertNotNull(termsEnum.next());
- DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null);
+ PostingsEnum dpEnum = termsEnum.postings(null, null, PostingsEnum.FLAG_ALL);
assertEquals(2, termsEnum.totalTermFreq());
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -261,7 +261,7 @@ public class TestTermVectorsWriter extends LuceneTestCase {
IndexReader r = DirectoryReader.open(dir);
TermsEnum termsEnum = r.getTermVectors(0).terms("field").iterator(null);
assertNotNull(termsEnum.next());
- DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null);
+ PostingsEnum dpEnum = termsEnum.postings(null, null, PostingsEnum.FLAG_ALL);
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
dpEnum.nextPosition();
@@ -269,14 +269,14 @@ public class TestTermVectorsWriter extends LuceneTestCase {
assertEquals(4, dpEnum.endOffset());
assertNotNull(termsEnum.next());
- dpEnum = termsEnum.docsAndPositions(null, dpEnum);
+ dpEnum = termsEnum.postings(null, dpEnum, PostingsEnum.FLAG_ALL);
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
dpEnum.nextPosition();
assertEquals(11, dpEnum.startOffset());
assertEquals(17, dpEnum.endOffset());
assertNotNull(termsEnum.next());
- dpEnum = termsEnum.docsAndPositions(null, dpEnum);
+ dpEnum = termsEnum.postings(null, dpEnum, PostingsEnum.FLAG_ALL);
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
dpEnum.nextPosition();
assertEquals(18, dpEnum.startOffset());
@@ -305,7 +305,7 @@ public class TestTermVectorsWriter extends LuceneTestCase {
IndexReader r = DirectoryReader.open(dir);
TermsEnum termsEnum = r.getTermVectors(0).terms("field").iterator(null);
assertNotNull(termsEnum.next());
- DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null);
+ PostingsEnum dpEnum = termsEnum.postings(null, null, PostingsEnum.FLAG_ALL);
assertEquals(1, (int) termsEnum.totalTermFreq());
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -314,7 +314,7 @@ public class TestTermVectorsWriter extends LuceneTestCase {
assertEquals(7, dpEnum.endOffset());
assertNotNull(termsEnum.next());
- dpEnum = termsEnum.docsAndPositions(null, dpEnum);
+ dpEnum = termsEnum.postings(null, dpEnum, PostingsEnum.FLAG_ALL);
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
dpEnum.nextPosition();
assertEquals(8, dpEnum.startOffset());
@@ -347,7 +347,7 @@ public class TestTermVectorsWriter extends LuceneTestCase {
IndexReader r = DirectoryReader.open(dir);
TermsEnum termsEnum = r.getTermVectors(0).terms("field").iterator(null);
assertNotNull(termsEnum.next());
- DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null);
+ PostingsEnum dpEnum = termsEnum.postings(null, null, PostingsEnum.FLAG_ALL);
assertEquals(1, (int) termsEnum.totalTermFreq());
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -356,7 +356,7 @@ public class TestTermVectorsWriter extends LuceneTestCase {
assertEquals(4, dpEnum.endOffset());
assertNotNull(termsEnum.next());
- dpEnum = termsEnum.docsAndPositions(null, dpEnum);
+ dpEnum = termsEnum.postings(null, dpEnum, PostingsEnum.FLAG_ALL);
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
dpEnum.nextPosition();
assertEquals(6, dpEnum.startOffset());
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTermdocPerf.java b/lucene/core/src/test/org/apache/lucene/index/TestTermdocPerf.java
index a1d3a773993..f2e33bdde8a 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestTermdocPerf.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestTermdocPerf.java
@@ -119,11 +119,11 @@ public class TestTermdocPerf extends LuceneTestCase {
start = System.currentTimeMillis();
int ret=0;
- DocsEnum tdocs = null;
+ PostingsEnum tdocs = null;
final Random random = new Random(random().nextLong());
for (int i=0; i getChildren() {
- throw new UnsupportedOperationException();
- }
- }
-
@Override
public Explanation explain(IndexSearcher searcher, Explanation firstPassExplanation, int docID) throws IOException {
Explanation result = super.explain(searcher, firstPassExplanation, docID);
diff --git a/lucene/expressions/src/java/org/apache/lucene/expressions/FakeScorer.java b/lucene/expressions/src/java/org/apache/lucene/expressions/FakeScorer.java
new file mode 100644
index 00000000000..133e0439079
--- /dev/null
+++ b/lucene/expressions/src/java/org/apache/lucene/expressions/FakeScorer.java
@@ -0,0 +1,84 @@
+package org.apache.lucene.expressions;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.util.BytesRef;
+
+class FakeScorer extends Scorer {
+
+ float score;
+ int doc = -1;
+ int freq = 1;
+
+ FakeScorer() {
+ super(null);
+ }
+
+ @Override
+ public int docID() {
+ return doc;
+ }
+
+ @Override
+ public int nextDoc() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int advance(int target) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public long cost() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int freq() throws IOException {
+ return freq;
+ }
+
+ @Override
+ public int nextPosition() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public float score() throws IOException {
+ return score;
+ }
+}
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/DrillSidewaysQuery.java b/lucene/facet/src/java/org/apache/lucene/facet/DrillSidewaysQuery.java
index 799c027f80a..ff4efae83fd 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/DrillSidewaysQuery.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/DrillSidewaysQuery.java
@@ -19,8 +19,9 @@ package org.apache.lucene.facet;
import java.io.IOException;
import java.util.Arrays;
-import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.search.BulkScorer;
import org.apache.lucene.search.Collector;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
@@ -29,10 +30,8 @@ import org.apache.lucene.search.Filter;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
-import org.apache.lucene.search.BulkScorer;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.Bits;
-
/** Only purpose is to punch through and return a
* DrillSidewaysScorer */
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/DrillSidewaysScorer.java b/lucene/facet/src/java/org/apache/lucene/facet/DrillSidewaysScorer.java
index 762c263ba11..a618231edde 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/DrillSidewaysScorer.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/DrillSidewaysScorer.java
@@ -21,15 +21,16 @@ import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.DocsEnum;
-import org.apache.lucene.search.LeafCollector;
+import org.apache.lucene.search.BulkScorer;
import org.apache.lucene.search.Collector;
import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.LeafCollector;
import org.apache.lucene.search.Scorer;
-import org.apache.lucene.search.BulkScorer;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FixedBitSet;
class DrillSidewaysScorer extends BulkScorer {
@@ -173,7 +174,7 @@ class DrillSidewaysScorer extends BulkScorer {
//}
int docID = baseScorer.docID();
- nextDoc: while (docID != DocsEnum.NO_MORE_DOCS) {
+ nextDoc: while (docID != PostingsEnum.NO_MORE_DOCS) {
LeafCollector failedCollector = null;
for (int i=0;i getChildren() {
- throw new UnsupportedOperationException();
- }
- }
-
private final void sumValues(List matchingDocs, boolean keepScores, ValueSource valueSource) throws IOException {
final FakeScorer scorer = new FakeScorer();
Map context = new HashMap<>();
@@ -104,7 +80,7 @@ public class TaxonomyFacetSumValueSource extends FloatTaxonomyFacets {
while ((doc = docs.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
ords.get(doc, scratch);
if (keepScores) {
- scorer.docID = doc;
+ scorer.doc = doc;
scorer.score = scores[scoresIdx++];
}
float value = (float) functionValues.doubleVal(doc);
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyReader.java b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyReader.java
index a216bb987c1..35d22103c42 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyReader.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyReader.java
@@ -12,7 +12,7 @@ import org.apache.lucene.facet.taxonomy.ParallelTaxonomyArrays;
import org.apache.lucene.facet.taxonomy.TaxonomyReader;
import org.apache.lucene.index.CorruptIndexException; // javadocs
import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.index.StoredDocument;
@@ -273,7 +273,7 @@ public class DirectoryTaxonomyReader extends TaxonomyReader {
// If we're still here, we have a cache miss. We need to fetch the
// value from disk, and then also put it in the cache:
int ret = TaxonomyReader.INVALID_ORDINAL;
- DocsEnum docs = MultiFields.getTermDocsEnum(indexReader, null, Consts.FULL, new BytesRef(FacetsConfig.pathToString(cp.components, cp.length)), 0);
+ PostingsEnum docs = MultiFields.getTermDocsEnum(indexReader, null, Consts.FULL, new BytesRef(FacetsConfig.pathToString(cp.components, cp.length)), 0);
if (docs != null && docs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
ret = docs.docID();
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyWriter.java b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyWriter.java
index 03cc88f332f..7472b3a583f 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyWriter.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyWriter.java
@@ -28,7 +28,7 @@ import org.apache.lucene.facet.taxonomy.writercache.LruTaxonomyWriterCache;
import org.apache.lucene.facet.taxonomy.writercache.TaxonomyWriterCache;
import org.apache.lucene.index.CorruptIndexException; // javadocs
import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
@@ -382,14 +382,14 @@ public class DirectoryTaxonomyWriter implements TaxonomyWriter {
try {
final BytesRef catTerm = new BytesRef(FacetsConfig.pathToString(categoryPath.components, categoryPath.length));
TermsEnum termsEnum = null; // reuse
- DocsEnum docs = null; // reuse
+ PostingsEnum docs = null; // reuse
for (LeafReaderContext ctx : reader.leaves()) {
Terms terms = ctx.reader().terms(Consts.FULL);
if (terms != null) {
termsEnum = terms.iterator(termsEnum);
if (termsEnum.seekExact(catTerm)) {
// liveDocs=null because the taxonomy has no deletes
- docs = termsEnum.docs(null, docs, 0 /* freqs not required */);
+ docs = termsEnum.postings(null, docs, 0 /* freqs not required */);
// if the term was found, we know it has exactly one document.
doc = docs.nextDoc() + ctx.docBase;
break;
@@ -675,7 +675,7 @@ public class DirectoryTaxonomyWriter implements TaxonomyWriter {
DirectoryReader reader = readerManager.acquire();
try {
TermsEnum termsEnum = null;
- DocsEnum docsEnum = null;
+ PostingsEnum postingsEnum = null;
for (LeafReaderContext ctx : reader.leaves()) {
Terms terms = ctx.reader().terms(Consts.FULL);
if (terms != null) { // cannot really happen, but be on the safe side
@@ -689,8 +689,8 @@ public class DirectoryTaxonomyWriter implements TaxonomyWriter {
// is sufficient to call next(), and then doc(), exactly once with no
// 'validation' checks.
FacetLabel cp = new FacetLabel(FacetsConfig.stringToPath(t.utf8ToString()));
- docsEnum = termsEnum.docs(null, docsEnum, DocsEnum.FLAG_NONE);
- boolean res = cache.put(cp, docsEnum.nextDoc() + ctx.docBase);
+ postingsEnum = termsEnum.postings(null, postingsEnum, PostingsEnum.FLAG_NONE);
+ boolean res = cache.put(cp, postingsEnum.nextDoc() + ctx.docBase);
assert !res : "entries should not have been evicted from the cache";
} else {
// the cache is full and the next put() will evict entries from it, therefore abort the iteration.
@@ -771,7 +771,7 @@ public class DirectoryTaxonomyWriter implements TaxonomyWriter {
ordinalMap.setSize(size);
int base = 0;
TermsEnum te = null;
- DocsEnum docs = null;
+ PostingsEnum docs = null;
for (final LeafReaderContext ctx : r.leaves()) {
final LeafReader ar = ctx.reader();
final Terms terms = ar.terms(Consts.FULL);
@@ -779,7 +779,7 @@ public class DirectoryTaxonomyWriter implements TaxonomyWriter {
while (te.next() != null) {
FacetLabel cp = new FacetLabel(FacetsConfig.stringToPath(te.term().utf8ToString()));
final int ordinal = addCategory(cp);
- docs = te.docs(null, docs, DocsEnum.FLAG_NONE);
+ docs = te.postings(null, docs, PostingsEnum.FLAG_NONE);
ordinalMap.addMapping(docs.nextDoc() + base, ordinal);
}
base += ar.maxDoc(); // no deletions, so we're ok
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/TaxonomyIndexArrays.java b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/TaxonomyIndexArrays.java
index 6a59db87793..b4dbe688123 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/TaxonomyIndexArrays.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/TaxonomyIndexArrays.java
@@ -1,16 +1,16 @@
package org.apache.lucene.facet.taxonomy.directory;
-import java.io.IOException;
-
import org.apache.lucene.facet.taxonomy.ParallelTaxonomyArrays;
import org.apache.lucene.facet.taxonomy.TaxonomyReader;
import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.util.ArrayUtil;
+import java.io.IOException;
+
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
@@ -129,9 +129,9 @@ class TaxonomyIndexArrays extends ParallelTaxonomyArrays {
// it's ok to use MultiFields because we only iterate on one posting list.
// breaking it to loop over the leaves() only complicates code for no
// apparent gain.
- DocsAndPositionsEnum positions = MultiFields.getTermPositionsEnum(reader, null,
+ PostingsEnum positions = MultiFields.getTermPositionsEnum(reader, null,
Consts.FIELD_PAYLOADS, Consts.PAYLOAD_PARENT_BYTES_REF,
- DocsAndPositionsEnum.FLAG_PAYLOADS);
+ PostingsEnum.FLAG_PAYLOADS);
// shouldn't really happen, if it does, something's wrong
if (positions == null || positions.advance(first) == DocIdSetIterator.NO_MORE_DOCS) {
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java
index 5e3a1f66165..8f2a345a17f 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java
@@ -17,13 +17,23 @@ package org.apache.lucene.search.grouping;
* limitations under the License.
*/
-
import java.io.IOException;
-import java.util.Collection;
-import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.search.*;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.FieldComparator;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.LeafCollector;
+import org.apache.lucene.search.LeafFieldComparator;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.SimpleCollector;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.search.TopDocsCollector;
+import org.apache.lucene.search.TopFieldCollector;
+import org.apache.lucene.search.TopScoreDocCollector;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.PriorityQueue;
@@ -86,56 +96,6 @@ public class BlockGroupingCollector extends SimpleCollector {
private final GroupQueue groupQueue;
private boolean groupCompetes;
- private final static class FakeScorer extends Scorer {
-
- float score;
- int doc;
-
- public FakeScorer() {
- super(null);
- }
-
- @Override
- public float score() {
- return score;
- }
-
- @Override
- public int freq() {
- throw new UnsupportedOperationException(); // TODO: wtf does this class do?
- }
-
- @Override
- public int docID() {
- return doc;
- }
-
- @Override
- public int advance(int target) {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public int nextDoc() {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public long cost() {
- return 1;
- }
-
- @Override
- public Weight getWeight() {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public Collection getChildren() {
- throw new UnsupportedOperationException();
- }
- }
-
private static final class OneGroup {
LeafReaderContext readerContext;
//int groupOrd;
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/FakeScorer.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/FakeScorer.java
new file mode 100644
index 00000000000..b5afcdcd3ff
--- /dev/null
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/FakeScorer.java
@@ -0,0 +1,84 @@
+package org.apache.lucene.search.grouping;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.util.BytesRef;
+
+class FakeScorer extends Scorer {
+
+ float score;
+ int doc = -1;
+ int freq = 1;
+
+ FakeScorer() {
+ super(null);
+ }
+
+ @Override
+ public int docID() {
+ return doc;
+ }
+
+ @Override
+ public int nextDoc() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int advance(int target) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public long cost() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int freq() throws IOException {
+ return freq;
+ }
+
+ @Override
+ public int nextPosition() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public float score() throws IOException {
+ return score;
+ }
+}
diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java
index ec3e8295638..cc9e75adb16 100644
--- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java
+++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java
@@ -18,10 +18,26 @@
package org.apache.lucene.search.grouping;
import java.io.IOException;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.*;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.IntField;
+import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.document.SortedDocValuesField;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexReaderContext;
@@ -34,7 +50,20 @@ import org.apache.lucene.index.SlowCompositeReaderWrapper;
import org.apache.lucene.index.Term;
import org.apache.lucene.queries.function.ValueSource;
import org.apache.lucene.queries.function.valuesource.BytesRefFieldSource;
-import org.apache.lucene.search.*;
+import org.apache.lucene.search.CachingCollector;
+import org.apache.lucene.search.CachingWrapperFilter;
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.FieldDoc;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MultiCollector;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.QueryWrapperFilter;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.Weight;
import org.apache.lucene.search.grouping.function.FunctionAllGroupsCollector;
import org.apache.lucene.search.grouping.function.FunctionFirstPassGroupingCollector;
import org.apache.lucene.search.grouping.function.FunctionSecondPassGroupingCollector;
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenStreamFromTermVector.java b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenStreamFromTermVector.java
index 936ac2b59eb..c381fad57b6 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenStreamFromTermVector.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenStreamFromTermVector.java
@@ -16,6 +16,7 @@ package org.apache.lucene.search.highlight;
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
import java.io.IOException;
import org.apache.lucene.analysis.TokenStream;
@@ -24,7 +25,7 @@ import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PackedTokenAttributeImpl;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.util.AttributeFactory;
@@ -122,7 +123,7 @@ public final class TokenStreamFromTermVector extends TokenStream {
final TermsEnum termsEnum = vector.iterator(null);
BytesRef termBytesRef;
- DocsAndPositionsEnum dpEnum = null;
+ PostingsEnum dpEnum = null;
//int sumFreq = 0;
while ((termBytesRef = termsEnum.next()) != null) {
//Grab the term (in same way as BytesRef.utf8ToString() but we don't want a String obj)
@@ -130,7 +131,7 @@ public final class TokenStreamFromTermVector extends TokenStream {
final char[] termChars = new char[termBytesRef.length];
final int termCharsLen = UnicodeUtil.UTF8toUTF16(termBytesRef, termChars);
- dpEnum = termsEnum.docsAndPositions(null, dpEnum);
+ dpEnum = termsEnum.postings(null, dpEnum, PostingsEnum.FLAG_POSITIONS);
assert dpEnum != null; // presumably checked by TokenSources.hasPositions earlier
dpEnum.nextDoc();
final int freq = dpEnum.freq();
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/MultiTermHighlighting.java b/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/MultiTermHighlighting.java
index 67cdf91ba60..2eb39abb605 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/MultiTermHighlighting.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/MultiTermHighlighting.java
@@ -26,7 +26,7 @@ import java.util.List;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.AutomatonQuery;
import org.apache.lucene.search.BooleanClause;
@@ -47,10 +47,10 @@ import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.CharsRef;
import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util.automaton.Automata;
-import org.apache.lucene.util.automaton.Operations;
+import org.apache.lucene.util.automaton.Automaton;
import org.apache.lucene.util.automaton.CharacterRunAutomaton;
import org.apache.lucene.util.automaton.LevenshteinAutomata;
-import org.apache.lucene.util.automaton.Automaton;
+import org.apache.lucene.util.automaton.Operations;
/**
* Support for highlighting multiterm queries in PostingsHighlighter.
@@ -197,7 +197,7 @@ class MultiTermHighlighting {
*
* This is solely used internally by PostingsHighlighter: DO NOT USE THIS METHOD!
*/
- static DocsAndPositionsEnum getDocsEnum(final TokenStream ts, final CharacterRunAutomaton[] matchers) throws IOException {
+ static PostingsEnum getDocsEnum(final TokenStream ts, final CharacterRunAutomaton[] matchers) throws IOException {
final CharTermAttribute charTermAtt = ts.addAttribute(CharTermAttribute.class);
final OffsetAttribute offsetAtt = ts.addAttribute(OffsetAttribute.class);
ts.reset();
@@ -207,7 +207,7 @@ class MultiTermHighlighting {
// would only serve to make this method less bogus.
// instead, we always return freq() = Integer.MAX_VALUE and let PH terminate based on offset...
- return new DocsAndPositionsEnum() {
+ return new PostingsEnum() {
int currentDoc = -1;
int currentMatch = -1;
int currentStartOffset = -1;
@@ -237,7 +237,7 @@ class MultiTermHighlighting {
currentStartOffset = currentEndOffset = Integer.MAX_VALUE;
return Integer.MAX_VALUE;
}
-
+
@Override
public int freq() throws IOException {
return Integer.MAX_VALUE; // lie
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/PostingsHighlighter.java b/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/PostingsHighlighter.java
index 14f364bde6e..203c62f6b9a 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/PostingsHighlighter.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/PostingsHighlighter.java
@@ -31,7 +31,7 @@ import java.util.SortedSet;
import java.util.TreeSet;
import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexReader;
@@ -455,7 +455,7 @@ public class PostingsHighlighter {
private Map highlightField(String field, String contents[], BreakIterator bi, BytesRef terms[], int[] docids, List leaves, int maxPassages, Query query) throws IOException {
Map highlights = new HashMap<>();
-
+
PassageFormatter fieldFormatter = getFormatter(field);
if (fieldFormatter == null) {
throw new NullPointerException("PassageFormatter cannot be null");
@@ -477,7 +477,7 @@ public class PostingsHighlighter {
// we are processing in increasing docid order, so we only need to reinitialize stuff on segment changes
// otherwise, we will just advance() existing enums to the new document in the same segment.
- DocsAndPositionsEnum postings[] = null;
+ PostingsEnum postings[] = null;
TermsEnum termsEnum = null;
int lastLeaf = -1;
@@ -499,7 +499,7 @@ public class PostingsHighlighter {
Terms t = r.terms(field);
if (t != null) {
termsEnum = t.iterator(null);
- postings = new DocsAndPositionsEnum[terms.length];
+ postings = new PostingsEnum[terms.length];
}
}
if (termsEnum == null) {
@@ -508,7 +508,7 @@ public class PostingsHighlighter {
// if there are multi-term matches, we have to initialize the "fake" enum for each document
if (automata.length > 0) {
- DocsAndPositionsEnum dp = MultiTermHighlighting.getDocsEnum(analyzer.tokenStream(field, content), automata);
+ PostingsEnum dp = MultiTermHighlighting.getDocsEnum(analyzer.tokenStream(field, content), automata);
dp.advance(doc - subContext.docBase);
postings[terms.length-1] = dp; // last term is the multiterm matcher
}
@@ -534,7 +534,7 @@ public class PostingsHighlighter {
// we can intersect these with the postings lists via BreakIterator.preceding(offset),s
// score each sentence as norm(sentenceStartOffset) * sum(weight * tf(freq))
private Passage[] highlightDoc(String field, BytesRef terms[], int contentLength, BreakIterator bi, int doc,
- TermsEnum termsEnum, DocsAndPositionsEnum[] postings, int n) throws IOException {
+ TermsEnum termsEnum, PostingsEnum[] postings, int n) throws IOException {
PassageScorer scorer = getScorer(field);
if (scorer == null) {
throw new NullPointerException("PassageScorer cannot be null");
@@ -543,7 +543,7 @@ public class PostingsHighlighter {
float weights[] = new float[terms.length];
// initialize postings
for (int i = 0; i < terms.length; i++) {
- DocsAndPositionsEnum de = postings[i];
+ PostingsEnum de = postings[i];
int pDoc;
if (de == EMPTY) {
continue;
@@ -552,7 +552,7 @@ public class PostingsHighlighter {
if (!termsEnum.seekExact(terms[i])) {
continue; // term not found
}
- de = postings[i] = termsEnum.docsAndPositions(null, null, DocsAndPositionsEnum.FLAG_OFFSETS);
+ de = postings[i] = termsEnum.postings(null, null, PostingsEnum.FLAG_OFFSETS);
if (de == null) {
// no positions available
throw new IllegalArgumentException("field '" + field + "' was indexed without offsets, cannot highlight");
@@ -590,7 +590,7 @@ public class PostingsHighlighter {
OffsetsEnum off;
while ((off = pq.poll()) != null) {
- final DocsAndPositionsEnum dp = off.dp;
+ final PostingsEnum dp = off.dp;
int start = dp.startOffset();
if (start == -1) {
throw new IllegalArgumentException("field '" + field + "' was indexed without offsets, cannot highlight");
@@ -698,11 +698,11 @@ public class PostingsHighlighter {
}
private static class OffsetsEnum implements Comparable {
- DocsAndPositionsEnum dp;
+ PostingsEnum dp;
int pos;
int id;
- OffsetsEnum(DocsAndPositionsEnum dp, int id) throws IOException {
+ OffsetsEnum(PostingsEnum dp, int id) throws IOException {
this.dp = dp;
this.id = id;
this.pos = 1;
@@ -724,10 +724,10 @@ public class PostingsHighlighter {
}
}
- private static final DocsAndPositionsEnum EMPTY = new DocsAndPositionsEnum() {
+ private static final PostingsEnum EMPTY = new PostingsEnum() {
@Override
- public int nextPosition() throws IOException { return 0; }
+ public int nextPosition() throws IOException { return -1; }
@Override
public int startOffset() throws IOException { return Integer.MAX_VALUE; }
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java
index 29c307a6ae9..80de2d8293d 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java
@@ -22,7 +22,7 @@ import java.util.Iterator;
import java.util.LinkedList;
import java.util.Set;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
@@ -93,7 +93,7 @@ public class FieldTermStack {
final CharsRefBuilder spare = new CharsRefBuilder();
final TermsEnum termsEnum = vector.iterator(null);
- DocsAndPositionsEnum dpEnum = null;
+ PostingsEnum dpEnum = null;
BytesRef text;
int numDocs = reader.maxDoc();
@@ -104,7 +104,7 @@ public class FieldTermStack {
if (!termSet.contains(term)) {
continue;
}
- dpEnum = termsEnum.docsAndPositions(null, dpEnum);
+ dpEnum = termsEnum.postings(null, dpEnum, PostingsEnum.FLAG_POSITIONS);
if (dpEnum == null) {
// null snippet
return;
diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/highlight/custom/HighlightCustomQueryTest.java b/lucene/highlighter/src/test/org/apache/lucene/search/highlight/custom/HighlightCustomQueryTest.java
index 536259ade85..a52d9b77f06 100644
--- a/lucene/highlighter/src/test/org/apache/lucene/search/highlight/custom/HighlightCustomQueryTest.java
+++ b/lucene/highlighter/src/test/org/apache/lucene/search/highlight/custom/HighlightCustomQueryTest.java
@@ -16,9 +16,6 @@ package org.apache.lucene.search.highlight.custom;
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-import java.io.IOException;
-import java.util.Map;
-
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenFilter;
import org.apache.lucene.analysis.MockTokenizer;
@@ -36,6 +33,9 @@ import org.apache.lucene.search.highlight.WeightedSpanTerm;
import org.apache.lucene.search.highlight.WeightedSpanTermExtractor;
import org.apache.lucene.util.LuceneTestCase;
+import java.io.IOException;
+import java.util.Map;
+
/**
* Tests the extensibility of {@link WeightedSpanTermExtractor} and
* {@link QueryScorer} in a user defined package
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/FakeScorer.java b/lucene/join/src/java/org/apache/lucene/search/join/FakeScorer.java
index cbd1ff8612a..eed50ddcba7 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/FakeScorer.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/FakeScorer.java
@@ -17,26 +17,20 @@ package org.apache.lucene.search.join;
* limitations under the License.
*/
-import java.util.Collection;
+import java.io.IOException;
-import org.apache.lucene.search.LeafCollector;
import org.apache.lucene.search.Scorer;
-import org.apache.lucene.search.Weight;
+import org.apache.lucene.util.BytesRef;
+
+class FakeScorer extends Scorer {
-/** Passed to {@link LeafCollector#setScorer} during join collection. */
-final class FakeScorer extends Scorer {
float score;
int doc = -1;
int freq = 1;
- public FakeScorer() {
+ FakeScorer() {
super(null);
}
-
- @Override
- public int advance(int target) {
- throw new UnsupportedOperationException("FakeScorer doesn't support advance(int)");
- }
@Override
public int docID() {
@@ -44,32 +38,47 @@ final class FakeScorer extends Scorer {
}
@Override
- public int freq() {
- throw new UnsupportedOperationException("FakeScorer doesn't support freq()");
+ public int nextDoc() throws IOException {
+ throw new UnsupportedOperationException();
}
@Override
- public int nextDoc() {
- throw new UnsupportedOperationException("FakeScorer doesn't support nextDoc()");
- }
-
- @Override
- public float score() {
- return score;
+ public int advance(int target) throws IOException {
+ throw new UnsupportedOperationException();
}
@Override
public long cost() {
- return 1;
- }
-
- @Override
- public Weight getWeight() {
throw new UnsupportedOperationException();
}
@Override
- public Collection getChildren() {
+ public int freq() throws IOException {
+ return freq;
+ }
+
+ @Override
+ public int nextPosition() throws IOException {
throw new UnsupportedOperationException();
}
+
+ @Override
+ public int startOffset() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public float score() throws IOException {
+ return score;
+ }
}
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/TermsIncludingScoreQuery.java b/lucene/join/src/java/org/apache/lucene/search/join/TermsIncludingScoreQuery.java
index 8c687451849..e3620bd59df 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/TermsIncludingScoreQuery.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/TermsIncludingScoreQuery.java
@@ -21,18 +21,16 @@ import java.io.IOException;
import java.util.Locale;
import java.util.Set;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.search.BulkScorer;
import org.apache.lucene.search.ComplexExplanation;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.LeafCollector;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
@@ -137,11 +135,11 @@ class TermsIncludingScoreQuery extends Query {
if (terms != null) {
segmentTermsEnum = terms.iterator(segmentTermsEnum);
BytesRef spare = new BytesRef();
- DocsEnum docsEnum = null;
+ PostingsEnum postingsEnum = null;
for (int i = 0; i < TermsIncludingScoreQuery.this.terms.size(); i++) {
if (segmentTermsEnum.seekExact(TermsIncludingScoreQuery.this.terms.get(ords[i], spare))) {
- docsEnum = segmentTermsEnum.docs(null, docsEnum, DocsEnum.FLAG_NONE);
- if (docsEnum.advance(doc) == doc) {
+ postingsEnum = segmentTermsEnum.postings(null, postingsEnum, PostingsEnum.FLAG_NONE);
+ if (postingsEnum.advance(doc) == doc) {
final float score = TermsIncludingScoreQuery.this.scores[ords[i]];
return new ComplexExplanation(true, score, "Score based on join value " + segmentTermsEnum.term().utf8ToString());
}
@@ -183,9 +181,10 @@ class TermsIncludingScoreQuery extends Query {
return new SVInOrderScorer(this, acceptDocs, segmentTermsEnum, context.reader().maxDoc(), cost);
}
}
+
};
}
-
+
class SVInOrderScorer extends Scorer {
final DocIdSetIterator matchingDocsIterator;
@@ -205,12 +204,12 @@ class TermsIncludingScoreQuery extends Query {
protected void fillDocsAndScores(FixedBitSet matchingDocs, Bits acceptDocs, TermsEnum termsEnum) throws IOException {
BytesRef spare = new BytesRef();
- DocsEnum docsEnum = null;
+ PostingsEnum postingsEnum = null;
for (int i = 0; i < terms.size(); i++) {
if (termsEnum.seekExact(terms.get(ords[i], spare))) {
- docsEnum = termsEnum.docs(acceptDocs, docsEnum, DocsEnum.FLAG_NONE);
+ postingsEnum = termsEnum.postings(acceptDocs, postingsEnum, PostingsEnum.FLAG_NONE);
float score = TermsIncludingScoreQuery.this.scores[ords[i]];
- for (int doc = docsEnum.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = docsEnum.nextDoc()) {
+ for (int doc = postingsEnum.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = postingsEnum.nextDoc()) {
matchingDocs.set(doc);
// In the case the same doc is also related to a another doc, a score might be overwritten. I think this
// can only happen in a many-to-many relation
@@ -230,6 +229,26 @@ class TermsIncludingScoreQuery extends Query {
return 1;
}
+ @Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
@Override
public int docID() {
return currentDoc;
@@ -261,12 +280,12 @@ class TermsIncludingScoreQuery extends Query {
@Override
protected void fillDocsAndScores(FixedBitSet matchingDocs, Bits acceptDocs, TermsEnum termsEnum) throws IOException {
BytesRef spare = new BytesRef();
- DocsEnum docsEnum = null;
+ PostingsEnum postingsEnum = null;
for (int i = 0; i < terms.size(); i++) {
if (termsEnum.seekExact(terms.get(ords[i], spare))) {
- docsEnum = termsEnum.docs(acceptDocs, docsEnum, DocsEnum.FLAG_NONE);
+ postingsEnum = termsEnum.postings(acceptDocs, postingsEnum, PostingsEnum.FLAG_NONE);
float score = TermsIncludingScoreQuery.this.scores[ords[i]];
- for (int doc = docsEnum.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = docsEnum.nextDoc()) {
+ for (int doc = postingsEnum.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = postingsEnum.nextDoc()) {
// I prefer this:
/*if (scores[doc] < score) {
scores[doc] = score;
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/ToChildBlockJoinQuery.java b/lucene/join/src/java/org/apache/lucene/search/join/ToChildBlockJoinQuery.java
index 45047b23477..24adabb7193 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/ToChildBlockJoinQuery.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/ToChildBlockJoinQuery.java
@@ -25,7 +25,6 @@ import java.util.Set;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
-import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
@@ -34,6 +33,7 @@ import org.apache.lucene.search.Weight;
import org.apache.lucene.util.BitDocIdSet;
import org.apache.lucene.util.BitSet;
import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
/**
* Just like {@link ToParentBlockJoinQuery}, except this
@@ -273,6 +273,26 @@ public class ToChildBlockJoinQuery extends Query {
return parentFreq;
}
+ @Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
@Override
public int advance(int childTarget) throws IOException {
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinCollector.java b/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinCollector.java
index bd47f9461d3..de1b4333bca 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinCollector.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinCollector.java
@@ -17,17 +17,33 @@ package org.apache.lucene.search.join;
* limitations under the License.
*/
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.Map;
+import java.util.Queue;
+
+import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.IndexWriter; // javadocs
-import org.apache.lucene.search.*;
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.FieldComparator;
+import org.apache.lucene.search.FieldValueHitQueue;
+import org.apache.lucene.search.LeafCollector;
+import org.apache.lucene.search.LeafFieldComparator;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.ScoreCachingWrappingScorer;
+import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Scorer.ChildScorer;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.search.TopDocsCollector;
+import org.apache.lucene.search.TopFieldCollector;
+import org.apache.lucene.search.TopScoreDocCollector;
import org.apache.lucene.search.grouping.GroupDocs;
import org.apache.lucene.search.grouping.TopGroups;
import org.apache.lucene.util.ArrayUtil;
-import java.io.IOException;
-import java.util.*;
-
/** Collects parent document hits for a Query containing one more more
* BlockJoinQuery clauses, sorted by the
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinIndexSearcher.java b/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinIndexSearcher.java
index e81f2fd0fbe..ca2ef13a961 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinIndexSearcher.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinIndexSearcher.java
@@ -21,6 +21,7 @@ import java.io.IOException;
import java.util.List;
import java.util.concurrent.ExecutorService;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.Collector;
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinQuery.java b/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinQuery.java
index cb49b87db34..4a157dba92d 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinQuery.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinQuery.java
@@ -39,6 +39,7 @@ import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.BitDocIdSet;
import org.apache.lucene.util.BitSet;
import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
/**
* This query requires that you index
@@ -369,6 +370,26 @@ public class ToParentBlockJoinQuery extends Query {
return parentFreq;
}
+ @Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
@Override
public int advance(int parentTarget) throws IOException {
diff --git a/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java b/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java
index 8a57593a50f..e05628b0a79 100644
--- a/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java
+++ b/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java
@@ -25,14 +25,55 @@ import java.util.List;
import java.util.Locale;
import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.*;
-import org.apache.lucene.index.*;
-import org.apache.lucene.search.*;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.IntField;
+import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.document.SortedDocValuesField;
+import org.apache.lucene.document.StoredField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.PostingsEnum;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.LogDocMergePolicy;
+import org.apache.lucene.index.MultiFields;
+import org.apache.lucene.index.NoMergePolicy;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.ReaderUtil;
+import org.apache.lucene.index.StoredDocument;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.FieldDoc;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.MultiTermQuery;
+import org.apache.lucene.search.NumericRangeQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.QueryUtils;
+import org.apache.lucene.search.QueryWrapperFilter;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.search.Weight;
import org.apache.lucene.search.grouping.GroupDocs;
import org.apache.lucene.search.grouping.TopGroups;
import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.*;
+import org.apache.lucene.util.BitSet;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefBuilder;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.NumericUtils;
+import org.apache.lucene.util.TestUtil;
public class TestBlockJoin extends LuceneTestCase {
@@ -624,9 +665,9 @@ public class TestBlockJoin extends LuceneTestCase {
for(int docIDX=0;docIDX joinValues = new TreeSet<>(BytesRef.getUTF8SortedAsUnicodeComparator());
joinValues.addAll(joinValueToJoinScores.keySet());
for (BytesRef joinValue : joinValues) {
termsEnum = terms.iterator(termsEnum);
if (termsEnum.seekExact(joinValue)) {
- docsEnum = termsEnum.docs(slowCompositeReader.getLiveDocs(), docsEnum, DocsEnum.FLAG_NONE);
+ postingsEnum = termsEnum.postings(slowCompositeReader.getLiveDocs(), postingsEnum, PostingsEnum.FLAG_NONE);
JoinScore joinScore = joinValueToJoinScores.get(joinValue);
- for (int doc = docsEnum.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = docsEnum.nextDoc()) {
+ for (int doc = postingsEnum.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = postingsEnum.nextDoc()) {
// First encountered join value determines the score.
// Something to keep in mind for many-to-many relations.
if (!docToJoinScore.containsKey(doc)) {
@@ -853,9 +853,9 @@ public class TestJoinUtil extends LuceneTestCase {
}
for (RandomDoc otherSideDoc : otherMatchingDocs) {
- DocsEnum docsEnum = MultiFields.getTermDocsEnum(topLevelReader, MultiFields.getLiveDocs(topLevelReader), "id", new BytesRef(otherSideDoc.id), 0);
- assert docsEnum != null;
- int doc = docsEnum.nextDoc();
+ PostingsEnum postingsEnum = MultiFields.getTermDocsEnum(topLevelReader, MultiFields.getLiveDocs(topLevelReader), "id", new BytesRef(otherSideDoc.id), 0);
+ assert postingsEnum != null;
+ int doc = postingsEnum.nextDoc();
expectedResult.set(doc);
}
}
diff --git a/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java b/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
index d963d54289a..c7758d2d013 100644
--- a/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
+++ b/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
@@ -34,8 +34,7 @@ import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
import org.apache.lucene.index.BinaryDocValues;
import org.apache.lucene.index.DocValuesType;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.FieldInvertState;
@@ -983,20 +982,12 @@ public class MemoryIndex {
}
@Override
- public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) {
- if (reuse == null || !(reuse instanceof MemoryDocsEnum)) {
- reuse = new MemoryDocsEnum();
- }
- return ((MemoryDocsEnum) reuse).reset(liveDocs, info.sliceArray.freq[info.sortedTerms[termUpto]]);
- }
-
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) {
- if (reuse == null || !(reuse instanceof MemoryDocsAndPositionsEnum)) {
- reuse = new MemoryDocsAndPositionsEnum();
+ public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) {
+ if (reuse == null || !(reuse instanceof MemoryPostingsEnum)) {
+ reuse = new MemoryPostingsEnum();
}
final int ord = info.sortedTerms[termUpto];
- return ((MemoryDocsAndPositionsEnum) reuse).reset(liveDocs, info.sliceArray.start[ord], info.sliceArray.end[ord], info.sliceArray.freq[ord]);
+ return ((MemoryPostingsEnum) reuse).reset(liveDocs, info.sliceArray.start[ord], info.sliceArray.end[ord], info.sliceArray.freq[ord]);
}
@Override
@@ -1013,69 +1004,26 @@ public class MemoryIndex {
}
}
- private class MemoryDocsEnum extends DocsEnum {
- private boolean hasNext;
- private Bits liveDocs;
- private int doc = -1;
- private int freq;
+ private class MemoryPostingsEnum extends PostingsEnum {
- public DocsEnum reset(Bits liveDocs, int freq) {
- this.liveDocs = liveDocs;
- hasNext = true;
- doc = -1;
- this.freq = freq;
- return this;
- }
-
- @Override
- public int docID() {
- return doc;
- }
-
- @Override
- public int nextDoc() {
- if (hasNext && (liveDocs == null || liveDocs.get(0))) {
- hasNext = false;
- return doc = 0;
- } else {
- return doc = NO_MORE_DOCS;
- }
- }
-
- @Override
- public int advance(int target) throws IOException {
- return slowAdvance(target);
- }
-
- @Override
- public int freq() throws IOException {
- return freq;
- }
-
- @Override
- public long cost() {
- return 1;
- }
- }
-
- private class MemoryDocsAndPositionsEnum extends DocsAndPositionsEnum {
private final SliceReader sliceReader;
private int posUpto; // for assert
private boolean hasNext;
private Bits liveDocs;
private int doc = -1;
private int freq;
+ private int pos;
private int startOffset;
private int endOffset;
private int payloadIndex;
private final BytesRefBuilder payloadBuilder;//only non-null when storePayloads
- public MemoryDocsAndPositionsEnum() {
+ public MemoryPostingsEnum() {
this.sliceReader = new SliceReader(intBlockPool);
this.payloadBuilder = storePayloads ? new BytesRefBuilder() : null;
}
- public DocsAndPositionsEnum reset(Bits liveDocs, int start, int end, int freq) {
+ public PostingsEnum reset(Bits liveDocs, int start, int end, int freq) {
this.liveDocs = liveDocs;
this.sliceReader.reset(start, end);
posUpto = 0; // for assert
@@ -1093,6 +1041,7 @@ public class MemoryIndex {
@Override
public int nextDoc() {
+ pos = -1;
if (hasNext && (liveDocs == null || liveDocs.get(0))) {
hasNext = false;
return doc = 0;
@@ -1113,10 +1062,12 @@ public class MemoryIndex {
@Override
public int nextPosition() {
- assert posUpto++ < freq;
+ posUpto++;
+ assert posUpto <= freq;
assert !sliceReader.endOfSlice() : " stores offsets : " + startOffset;
int pos = sliceReader.readInt();
if (storeOffsets) {
+ //pos = sliceReader.readInt();
startOffset = sliceReader.readInt();
endOffset = sliceReader.readInt();
}
diff --git a/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndexAgainstRAMDir.java b/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndexAgainstRAMDir.java
index a507552ce5d..2b14858780a 100644
--- a/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndexAgainstRAMDir.java
+++ b/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndexAgainstRAMDir.java
@@ -42,8 +42,7 @@ import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.CompositeReader;
import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexReader;
@@ -199,9 +198,9 @@ public class TestMemoryIndexAgainstRAMDir extends BaseTokenStreamTestCase {
while(iwTermsIter.next() != null) {
assertNotNull(memTermsIter.next());
assertEquals(iwTermsIter.term(), memTermsIter.term());
- DocsAndPositionsEnum iwDocsAndPos = iwTermsIter.docsAndPositions(null, null);
- DocsAndPositionsEnum memDocsAndPos = memTermsIter.docsAndPositions(null, null);
- while(iwDocsAndPos.nextDoc() != DocsAndPositionsEnum.NO_MORE_DOCS) {
+ PostingsEnum iwDocsAndPos = iwTermsIter.postings(null, null, PostingsEnum.FLAG_ALL);
+ PostingsEnum memDocsAndPos = memTermsIter.postings(null, null, PostingsEnum.FLAG_ALL);
+ while(iwDocsAndPos.nextDoc() != PostingsEnum.NO_MORE_DOCS) {
assertEquals(iwDocsAndPos.docID(), memDocsAndPos.nextDoc());
assertEquals(iwDocsAndPos.freq(), memDocsAndPos.freq());
for (int i = 0; i < iwDocsAndPos.freq(); i++) {
@@ -222,9 +221,9 @@ public class TestMemoryIndexAgainstRAMDir extends BaseTokenStreamTestCase {
} else {
while(iwTermsIter.next() != null) {
assertEquals(iwTermsIter.term(), memTermsIter.term());
- DocsEnum iwDocsAndPos = iwTermsIter.docs(null, null);
- DocsEnum memDocsAndPos = memTermsIter.docs(null, null);
- while(iwDocsAndPos.nextDoc() != DocsAndPositionsEnum.NO_MORE_DOCS) {
+ PostingsEnum iwDocsAndPos = iwTermsIter.postings(null, null);
+ PostingsEnum memDocsAndPos = memTermsIter.postings(null, null);
+ while(iwDocsAndPos.nextDoc() != PostingsEnum.NO_MORE_DOCS) {
assertEquals(iwDocsAndPos.docID(), memDocsAndPos.nextDoc());
assertEquals(iwDocsAndPos.freq(), memDocsAndPos.freq());
}
@@ -319,7 +318,7 @@ public class TestMemoryIndexAgainstRAMDir extends BaseTokenStreamTestCase {
MemoryIndex memory = new MemoryIndex(random().nextBoolean(), false, random().nextInt(50) * 1024 * 1024);
memory.addField("foo", "bar", analyzer);
LeafReader reader = (LeafReader) memory.createSearcher().getIndexReader();
- DocsEnum disi = TestUtil.docs(random(), reader, "foo", new BytesRef("bar"), null, null, DocsEnum.FLAG_NONE);
+ PostingsEnum disi = TestUtil.docs(random(), reader, "foo", new BytesRef("bar"), null, null, PostingsEnum.FLAG_NONE);
int docid = disi.docID();
assertEquals(-1, docid);
assertTrue(disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -327,7 +326,7 @@ public class TestMemoryIndexAgainstRAMDir extends BaseTokenStreamTestCase {
// now reuse and check again
TermsEnum te = reader.terms("foo").iterator(null);
assertTrue(te.seekExact(new BytesRef("bar")));
- disi = te.docs(null, disi, DocsEnum.FLAG_NONE);
+ disi = te.postings(null, disi, PostingsEnum.FLAG_NONE);
docid = disi.docID();
assertEquals(-1, docid);
assertTrue(disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -354,7 +353,7 @@ public class TestMemoryIndexAgainstRAMDir extends BaseTokenStreamTestCase {
memory.addField("foo", "bar", analyzer);
LeafReader reader = (LeafReader) memory.createSearcher().getIndexReader();
assertEquals(1, reader.terms("foo").getSumTotalTermFreq());
- DocsAndPositionsEnum disi = reader.termPositionsEnum(new Term("foo", "bar"));
+ PostingsEnum disi = reader.termDocsEnum(new Term("foo", "bar"), PostingsEnum.FLAG_ALL);
int docid = disi.docID();
assertEquals(-1, docid);
assertTrue(disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -365,7 +364,7 @@ public class TestMemoryIndexAgainstRAMDir extends BaseTokenStreamTestCase {
// now reuse and check again
TermsEnum te = reader.terms("foo").iterator(null);
assertTrue(te.seekExact(new BytesRef("bar")));
- disi = te.docsAndPositions(null, disi);
+ disi = te.postings(null, disi);
docid = disi.docID();
assertEquals(-1, docid);
assertTrue(disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -426,7 +425,7 @@ public class TestMemoryIndexAgainstRAMDir extends BaseTokenStreamTestCase {
assertNull(reader.getNumericDocValues("not-in-index"));
assertNull(reader.getNormValues("not-in-index"));
assertNull(reader.termDocsEnum(new Term("not-in-index", "foo")));
- assertNull(reader.termPositionsEnum(new Term("not-in-index", "foo")));
+ assertNull(reader.termDocsEnum(new Term("not-in-index", "foo"), PostingsEnum.FLAG_ALL));
assertNull(reader.terms("not-in-index"));
}
@@ -526,8 +525,8 @@ public class TestMemoryIndexAgainstRAMDir extends BaseTokenStreamTestCase {
assertNotNull(memTermEnum.next());
assertThat(termEnum.totalTermFreq(), equalTo(memTermEnum.totalTermFreq()));
- DocsAndPositionsEnum docsPosEnum = termEnum.docsAndPositions(null, null, 0);
- DocsAndPositionsEnum memDocsPosEnum = memTermEnum.docsAndPositions(null, null, 0);
+ PostingsEnum docsPosEnum = termEnum.postings(null, null, PostingsEnum.FLAG_POSITIONS);
+ PostingsEnum memDocsPosEnum = memTermEnum.postings(null, null, PostingsEnum.FLAG_POSITIONS);
String currentTerm = termEnum.term().utf8ToString();
assertThat("Token mismatch for field: " + field_name, currentTerm, equalTo(memTermEnum.term().utf8ToString()));
diff --git a/lucene/misc/src/java/org/apache/lucene/index/Sorter.java b/lucene/misc/src/java/org/apache/lucene/index/Sorter.java
index 22912bc67c9..1892e658cac 100644
--- a/lucene/misc/src/java/org/apache/lucene/index/Sorter.java
+++ b/lucene/misc/src/java/org/apache/lucene/index/Sorter.java
@@ -24,6 +24,7 @@ import org.apache.lucene.search.LeafFieldComparator;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
+import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.TimSorter;
import org.apache.lucene.util.packed.PackedInts;
import org.apache.lucene.util.packed.PackedLongValues;
@@ -259,24 +260,60 @@ final class Sorter {
}
static final Scorer FAKESCORER = new Scorer(null) {
-
- @Override
- public float score() throws IOException { throw new UnsupportedOperationException(); }
-
- @Override
- public int freq() throws IOException { throw new UnsupportedOperationException(); }
+
+ float score;
+ int doc = -1;
+ int freq = 1;
@Override
- public int docID() { throw new UnsupportedOperationException(); }
+ public int docID() {
+ return doc;
+ }
@Override
- public int nextDoc() throws IOException { throw new UnsupportedOperationException(); }
+ public int nextDoc() throws IOException {
+ throw new UnsupportedOperationException();
+ }
@Override
- public int advance(int target) throws IOException { throw new UnsupportedOperationException(); }
+ public int advance(int target) throws IOException {
+ throw new UnsupportedOperationException();
+ }
@Override
- public long cost() { throw new UnsupportedOperationException(); }
+ public long cost() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int freq() throws IOException {
+ return freq;
+ }
+
+ @Override
+ public int nextPosition() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public float score() throws IOException {
+ return score;
+ }
};
}
diff --git a/lucene/misc/src/java/org/apache/lucene/index/SortingLeafReader.java b/lucene/misc/src/java/org/apache/lucene/index/SortingLeafReader.java
index ee6140b47cd..2b23e2a529b 100644
--- a/lucene/misc/src/java/org/apache/lucene/index/SortingLeafReader.java
+++ b/lucene/misc/src/java/org/apache/lucene/index/SortingLeafReader.java
@@ -20,21 +20,6 @@ package org.apache.lucene.index;
import java.io.IOException;
import java.util.Arrays;
-import org.apache.lucene.index.FilterLeafReader;
-import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.index.BinaryDocValues;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
-import org.apache.lucene.index.IndexOptions;
-import org.apache.lucene.index.FieldInfos;
-import org.apache.lucene.index.Fields;
-import org.apache.lucene.index.NumericDocValues;
-import org.apache.lucene.index.SortedDocValues;
-import org.apache.lucene.index.SortedNumericDocValues;
-import org.apache.lucene.index.SortedSetDocValues;
-import org.apache.lucene.index.StoredFieldVisitor;
-import org.apache.lucene.index.Terms;
-import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.index.Sorter.DocMap;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Sort;
@@ -52,7 +37,7 @@ import org.apache.lucene.util.automaton.CompiledAutomaton;
/**
* An {@link org.apache.lucene.index.LeafReader} which supports sorting documents by a given
* {@link Sort}. You can use this class to sort an index as follows:
- *
+ *
*
* IndexWriter writer; // writer to which the sorted index will be added
* DirectoryReader reader; // reader on the input index
@@ -62,7 +47,7 @@ import org.apache.lucene.util.automaton.CompiledAutomaton;
* writer.close();
* reader.close();
*
- *
+ *
* @lucene.experimental
*/
public class SortingLeafReader extends FilterLeafReader {
@@ -94,7 +79,7 @@ public class SortingLeafReader extends FilterLeafReader {
private final Sorter.DocMap docMap;
private final IndexOptions indexOptions;
-
+
public SortingTerms(final Terms in, IndexOptions indexOptions, final Sorter.DocMap docMap) {
super(in);
this.docMap = docMap;
@@ -118,7 +103,7 @@ public class SortingLeafReader extends FilterLeafReader {
final Sorter.DocMap docMap; // pkg-protected to avoid synthetic accessor methods
private final IndexOptions indexOptions;
-
+
public SortingTermsEnum(final TermsEnum in, Sorter.DocMap docMap, IndexOptions indexOptions) {
super(in);
this.docMap = docMap;
@@ -145,8 +130,35 @@ public class SortingLeafReader extends FilterLeafReader {
}
@Override
- public DocsEnum docs(Bits liveDocs, DocsEnum reuse, final int flags) throws IOException {
- final DocsEnum inReuse;
+ public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, final int flags) throws IOException {
+
+ if (PostingsEnum.requiresPositions(flags)) {
+ final PostingsEnum inReuse;
+ final SortingPostingsEnum wrapReuse;
+ if (reuse != null && reuse instanceof SortingPostingsEnum) {
+ // if we're asked to reuse the given DocsEnum and it is Sorting, return
+ // the wrapped one, since some Codecs expect it.
+ wrapReuse = (SortingPostingsEnum) reuse;
+ inReuse = wrapReuse.getWrapped();
+ } else {
+ wrapReuse = null;
+ inReuse = reuse;
+ }
+
+ final PostingsEnum inDocsAndPositions = in.postings(newToOld(liveDocs), inReuse, flags);
+ if (inDocsAndPositions == null) {
+ return null;
+ }
+
+ // we ignore the fact that offsets may be stored but not asked for,
+ // since this code is expected to be used during addIndexes which will
+ // ask for everything. if that assumption changes in the future, we can
+ // factor in whether 'flags' says offsets are not required.
+ final boolean storeOffsets = indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
+ return new SortingPostingsEnum(docMap.size(), wrapReuse, inDocsAndPositions, docMap, storeOffsets);
+ }
+
+ final PostingsEnum inReuse;
final SortingDocsEnum wrapReuse;
if (reuse != null && reuse instanceof SortingDocsEnum) {
// if we're asked to reuse the given DocsEnum and it is Sorting, return
@@ -158,45 +170,18 @@ public class SortingLeafReader extends FilterLeafReader {
inReuse = reuse;
}
- final DocsEnum inDocs = in.docs(newToOld(liveDocs), inReuse, flags);
- final boolean withFreqs = indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS) >=0 && (flags & DocsEnum.FLAG_FREQS) != 0;
+ final PostingsEnum inDocs = in.postings(newToOld(liveDocs), inReuse, flags);
+ final boolean withFreqs = indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS) >=0 && (flags & PostingsEnum.FLAG_FREQS) != 0;
return new SortingDocsEnum(docMap.size(), wrapReuse, inDocs, withFreqs, docMap);
}
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, final int flags) throws IOException {
- final DocsAndPositionsEnum inReuse;
- final SortingDocsAndPositionsEnum wrapReuse;
- if (reuse != null && reuse instanceof SortingDocsAndPositionsEnum) {
- // if we're asked to reuse the given DocsEnum and it is Sorting, return
- // the wrapped one, since some Codecs expect it.
- wrapReuse = (SortingDocsAndPositionsEnum) reuse;
- inReuse = wrapReuse.getWrapped();
- } else {
- wrapReuse = null;
- inReuse = reuse;
- }
-
- final DocsAndPositionsEnum inDocsAndPositions = in.docsAndPositions(newToOld(liveDocs), inReuse, flags);
- if (inDocsAndPositions == null) {
- return null;
- }
-
- // we ignore the fact that offsets may be stored but not asked for,
- // since this code is expected to be used during addIndexes which will
- // ask for everything. if that assumption changes in the future, we can
- // factor in whether 'flags' says offsets are not required.
- final boolean storeOffsets = indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
- return new SortingDocsAndPositionsEnum(docMap.size(), wrapReuse, inDocsAndPositions, docMap, storeOffsets);
- }
-
}
private static class SortingBinaryDocValues extends BinaryDocValues {
-
+
private final BinaryDocValues in;
private final Sorter.DocMap docMap;
-
+
SortingBinaryDocValues(BinaryDocValues in, Sorter.DocMap docMap) {
this.in = in;
this.docMap = docMap;
@@ -207,7 +192,7 @@ public class SortingLeafReader extends FilterLeafReader {
return in.get(docMap.newToOld(docID));
}
}
-
+
private static class SortingNumericDocValues extends NumericDocValues {
private final NumericDocValues in;
@@ -223,33 +208,33 @@ public class SortingLeafReader extends FilterLeafReader {
return in.get(docMap.newToOld(docID));
}
}
-
+
private static class SortingSortedNumericDocValues extends SortedNumericDocValues {
-
+
private final SortedNumericDocValues in;
private final Sorter.DocMap docMap;
-
+
SortingSortedNumericDocValues(SortedNumericDocValues in, DocMap docMap) {
this.in = in;
this.docMap = docMap;
}
-
+
@Override
public int count() {
return in.count();
}
-
+
@Override
public void setDocument(int doc) {
in.setDocument(docMap.newToOld(doc));
}
-
+
@Override
public long valueAt(int index) {
return in.valueAt(index);
}
}
-
+
private static class SortingBits implements Bits {
private final Bits in;
@@ -270,12 +255,12 @@ public class SortingLeafReader extends FilterLeafReader {
return in.length();
}
}
-
+
private static class SortingSortedDocValues extends SortedDocValues {
-
+
private final SortedDocValues in;
private final Sorter.DocMap docMap;
-
+
SortingSortedDocValues(SortedDocValues in, Sorter.DocMap docMap) {
this.in = in;
this.docMap = docMap;
@@ -306,12 +291,12 @@ public class SortingLeafReader extends FilterLeafReader {
return in.lookupTerm(key);
}
}
-
+
private static class SortingSortedSetDocValues extends SortedSetDocValues {
-
+
private final SortedSetDocValues in;
private final Sorter.DocMap docMap;
-
+
SortingSortedSetDocValues(SortedSetDocValues in, Sorter.DocMap docMap) {
this.in = in;
this.docMap = docMap;
@@ -344,14 +329,14 @@ public class SortingLeafReader extends FilterLeafReader {
}
static class SortingDocsEnum extends FilterDocsEnum {
-
+
private static final class DocFreqSorter extends TimSorter {
-
+
private int[] docs;
private int[] freqs;
private final int[] tmpDocs;
private int[] tmpFreqs;
-
+
public DocFreqSorter(int maxDoc) {
super(maxDoc / 64);
this.tmpDocs = new int[maxDoc / 64];
@@ -369,13 +354,13 @@ public class SortingLeafReader extends FilterLeafReader {
protected int compare(int i, int j) {
return docs[i] - docs[j];
}
-
+
@Override
protected void swap(int i, int j) {
int tmpDoc = docs[i];
docs[i] = docs[j];
docs[j] = tmpDoc;
-
+
if (freqs != null) {
int tmpFreq = freqs[i];
freqs[i] = freqs[j];
@@ -421,7 +406,7 @@ public class SortingLeafReader extends FilterLeafReader {
private final int upto;
private final boolean withFreqs;
- SortingDocsEnum(int maxDoc, SortingDocsEnum reuse, final DocsEnum in, boolean withFreqs, final Sorter.DocMap docMap) throws IOException {
+ SortingDocsEnum(int maxDoc, SortingDocsEnum reuse, final PostingsEnum in, boolean withFreqs, final Sorter.DocMap docMap) throws IOException {
super(in);
this.maxDoc = maxDoc;
this.withFreqs = withFreqs;
@@ -470,7 +455,7 @@ public class SortingLeafReader extends FilterLeafReader {
}
// for testing
- boolean reused(DocsEnum other) {
+ boolean reused(PostingsEnum other) {
if (other == null || !(other instanceof SortingDocsEnum)) {
return false;
}
@@ -483,43 +468,43 @@ public class SortingLeafReader extends FilterLeafReader {
// don't bother to implement efficiently for now.
return slowAdvance(target);
}
-
+
@Override
public int docID() {
return docIt < 0 ? -1 : docIt >= upto ? NO_MORE_DOCS : docs[docIt];
}
-
+
@Override
public int freq() throws IOException {
return withFreqs && docIt < upto ? freqs[docIt] : 1;
}
-
+
@Override
public int nextDoc() throws IOException {
if (++docIt >= upto) return NO_MORE_DOCS;
return docs[docIt];
}
-
- /** Returns the wrapped {@link DocsEnum}. */
- DocsEnum getWrapped() {
+
+ /** Returns the wrapped {@link PostingsEnum}. */
+ PostingsEnum getWrapped() {
return in;
}
}
-
- static class SortingDocsAndPositionsEnum extends FilterDocsAndPositionsEnum {
-
+
+ static class SortingPostingsEnum extends FilterDocsEnum {
+
/**
* A {@link TimSorter} which sorts two parallel arrays of doc IDs and
* offsets in one go. Everytime a doc ID is 'swapped', its corresponding offset
* is swapped too.
*/
private static final class DocOffsetSorter extends TimSorter {
-
+
private int[] docs;
private long[] offsets;
private final int[] tmpDocs;
private final long[] tmpOffsets;
-
+
public DocOffsetSorter(int maxDoc) {
super(maxDoc / 64);
this.tmpDocs = new int[maxDoc / 64];
@@ -535,13 +520,13 @@ public class SortingLeafReader extends FilterLeafReader {
protected int compare(int i, int j) {
return docs[i] - docs[j];
}
-
+
@Override
protected void swap(int i, int j) {
int tmpDoc = docs[i];
docs[i] = docs[j];
docs[j] = tmpDoc;
-
+
long tmpOffset = offsets[i];
offsets[i] = offsets[j];
offsets[j] = tmpOffset;
@@ -570,16 +555,16 @@ public class SortingLeafReader extends FilterLeafReader {
return tmpDocs[i] - docs[j];
}
}
-
+
private final int maxDoc;
private final DocOffsetSorter sorter;
private int[] docs;
private long[] offsets;
private final int upto;
-
+
private final IndexInput postingInput;
private final boolean storeOffsets;
-
+
private int docIt = -1;
private int pos;
private int startOffset = -1;
@@ -589,7 +574,7 @@ public class SortingLeafReader extends FilterLeafReader {
private final RAMFile file;
- SortingDocsAndPositionsEnum(int maxDoc, SortingDocsAndPositionsEnum reuse, final DocsAndPositionsEnum in, Sorter.DocMap docMap, boolean storeOffsets) throws IOException {
+ SortingPostingsEnum(int maxDoc, SortingPostingsEnum reuse, final PostingsEnum in, Sorter.DocMap docMap, boolean storeOffsets) throws IOException {
super(in);
this.maxDoc = maxDoc;
this.storeOffsets = storeOffsets;
@@ -632,14 +617,14 @@ public class SortingLeafReader extends FilterLeafReader {
}
// for testing
- boolean reused(DocsAndPositionsEnum other) {
- if (other == null || !(other instanceof SortingDocsAndPositionsEnum)) {
+ boolean reused(PostingsEnum other) {
+ if (other == null || !(other instanceof SortingPostingsEnum)) {
return false;
}
- return docs == ((SortingDocsAndPositionsEnum) other).docs;
+ return docs == ((SortingPostingsEnum) other).docs;
}
- private void addPositions(final DocsAndPositionsEnum in, final IndexOutput out) throws IOException {
+ private void addPositions(final PostingsEnum in, final IndexOutput out) throws IOException {
int freq = in.freq();
out.writeVInt(freq);
int previousPosition = 0;
@@ -648,7 +633,7 @@ public class SortingLeafReader extends FilterLeafReader {
final int pos = in.nextPosition();
final BytesRef payload = in.getPayload();
// The low-order bit of token is set only if there is a payload, the
- // previous bits are the delta-encoded position.
+ // previous bits are the delta-encoded position.
final int token = (pos - previousPosition) << 1 | (payload == null ? 0 : 1);
out.writeVInt(token);
previousPosition = pos;
@@ -665,34 +650,34 @@ public class SortingLeafReader extends FilterLeafReader {
}
}
}
-
+
@Override
public int advance(final int target) throws IOException {
// need to support it for checkIndex, but in practice it won't be called, so
// don't bother to implement efficiently for now.
return slowAdvance(target);
}
-
+
@Override
public int docID() {
return docIt < 0 ? -1 : docIt >= upto ? NO_MORE_DOCS : docs[docIt];
}
-
+
@Override
public int endOffset() throws IOException {
return endOffset;
}
-
+
@Override
public int freq() throws IOException {
return currFreq;
}
-
+
@Override
public BytesRef getPayload() throws IOException {
return payload.length == 0 ? null : payload;
}
-
+
@Override
public int nextDoc() throws IOException {
if (++docIt >= upto) return DocIdSetIterator.NO_MORE_DOCS;
@@ -703,7 +688,7 @@ public class SortingLeafReader extends FilterLeafReader {
endOffset = 0;
return docs[docIt];
}
-
+
@Override
public int nextPosition() throws IOException {
final int token = postingInput.readVInt();
@@ -724,14 +709,14 @@ public class SortingLeafReader extends FilterLeafReader {
}
return pos;
}
-
+
@Override
public int startOffset() throws IOException {
return startOffset;
}
- /** Returns the wrapped {@link DocsAndPositionsEnum}. */
- DocsAndPositionsEnum getWrapped() {
+ /** Returns the wrapped {@link PostingsEnum}. */
+ PostingsEnum getWrapped() {
return in;
}
}
@@ -767,12 +752,12 @@ public class SortingLeafReader extends FilterLeafReader {
public void document(final int docID, final StoredFieldVisitor visitor) throws IOException {
in.document(docMap.newToOld(docID), visitor);
}
-
+
@Override
public Fields fields() throws IOException {
return new SortingFields(in.fields(), in.getFieldInfos(), docMap);
}
-
+
@Override
public BinaryDocValues getBinaryDocValues(String field) throws IOException {
BinaryDocValues oldDocValues = in.getBinaryDocValues(field);
@@ -782,7 +767,7 @@ public class SortingLeafReader extends FilterLeafReader {
return new SortingBinaryDocValues(oldDocValues, docMap);
}
}
-
+
@Override
public Bits getLiveDocs() {
final Bits inLiveDocs = in.getLiveDocs();
@@ -792,7 +777,7 @@ public class SortingLeafReader extends FilterLeafReader {
return new SortingBits(inLiveDocs, docMap);
}
}
-
+
@Override
public NumericDocValues getNormValues(String field) throws IOException {
final NumericDocValues norm = in.getNormValues(field);
@@ -809,7 +794,7 @@ public class SortingLeafReader extends FilterLeafReader {
if (oldDocValues == null) return null;
return new SortingNumericDocValues(oldDocValues, docMap);
}
-
+
@Override
public SortedNumericDocValues getSortedNumericDocValues(String field)
throws IOException {
@@ -830,7 +815,7 @@ public class SortingLeafReader extends FilterLeafReader {
return new SortingSortedDocValues(sortedDV, docMap);
}
}
-
+
@Override
public SortedSetDocValues getSortedSetDocValues(String field) throws IOException {
SortedSetDocValues sortedSetDV = in.getSortedSetDocValues(field);
@@ -838,7 +823,7 @@ public class SortingLeafReader extends FilterLeafReader {
return null;
} else {
return new SortingSortedSetDocValues(sortedSetDV, docMap);
- }
+ }
}
@Override
diff --git a/lucene/misc/src/java/org/apache/lucene/uninverting/DocTermOrds.java b/lucene/misc/src/java/org/apache/lucene/uninverting/DocTermOrds.java
index 6b6eb8f437b..aebb33e1c02 100644
--- a/lucene/misc/src/java/org/apache/lucene/uninverting/DocTermOrds.java
+++ b/lucene/misc/src/java/org/apache/lucene/uninverting/DocTermOrds.java
@@ -25,8 +25,7 @@ import java.util.List;
import org.apache.lucene.codecs.PostingsFormat; // javadocs
import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.DocValuesType;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.SortedSetDocValues;
@@ -165,7 +164,7 @@ public class DocTermOrds implements Accountable {
protected int ordBase;
/** Used while uninverting. */
- protected DocsEnum docsEnum;
+ protected PostingsEnum postingsEnum;
/** Returns total bytes used. */
public long ramBytesUsed() {
@@ -326,7 +325,7 @@ public class DocTermOrds implements Accountable {
// frequent terms ahead of time.
int termNum = 0;
- docsEnum = null;
+ postingsEnum = null;
// Loop begins with te positioned to first term (we call
// seek above):
@@ -366,13 +365,13 @@ public class DocTermOrds implements Accountable {
final int df = te.docFreq();
if (df <= maxTermDocFreq) {
- docsEnum = te.docs(liveDocs, docsEnum, DocsEnum.FLAG_NONE);
+ postingsEnum = te.postings(liveDocs, postingsEnum, PostingsEnum.FLAG_NONE);
// dF, but takes deletions into account
int actualDF = 0;
for (;;) {
- int doc = docsEnum.nextDoc();
+ int doc = postingsEnum.nextDoc();
if (doc == DocIdSetIterator.NO_MORE_DOCS) {
break;
}
@@ -613,13 +612,8 @@ public class DocTermOrds implements Accountable {
}
@Override
- public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
- return termsEnum.docs(liveDocs, reuse, flags);
- }
-
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
- return termsEnum.docsAndPositions(liveDocs, reuse, flags);
+ public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException {
+ return termsEnum.postings(liveDocs, reuse, flags);
}
@Override
diff --git a/lucene/misc/src/java/org/apache/lucene/uninverting/FieldCacheImpl.java b/lucene/misc/src/java/org/apache/lucene/uninverting/FieldCacheImpl.java
index 6b18fbf348d..b7f98f1a25e 100644
--- a/lucene/misc/src/java/org/apache/lucene/uninverting/FieldCacheImpl.java
+++ b/lucene/misc/src/java/org/apache/lucene/uninverting/FieldCacheImpl.java
@@ -30,7 +30,7 @@ import java.util.WeakHashMap;
import org.apache.lucene.index.BinaryDocValues;
import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.DocValuesType;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.LeafReader;
@@ -280,7 +280,7 @@ class FieldCacheImpl implements FieldCache {
final TermsEnum termsEnum = termsEnum(terms);
- DocsEnum docs = null;
+ PostingsEnum docs = null;
FixedBitSet docsWithField = null;
while(true) {
final BytesRef term = termsEnum.next();
@@ -288,7 +288,7 @@ class FieldCacheImpl implements FieldCache {
break;
}
visitTerm(term);
- docs = termsEnum.docs(null, docs, DocsEnum.FLAG_NONE);
+ docs = termsEnum.postings(null, docs, PostingsEnum.FLAG_NONE);
while (true) {
final int docID = docs.nextDoc();
if (docID == DocIdSetIterator.NO_MORE_DOCS) {
@@ -408,7 +408,7 @@ class FieldCacheImpl implements FieldCache {
return new BitsEntry(new Bits.MatchAllBits(maxDoc));
}
final TermsEnum termsEnum = terms.iterator(null);
- DocsEnum docs = null;
+ PostingsEnum docs = null;
while(true) {
final BytesRef term = termsEnum.next();
if (term == null) {
@@ -419,7 +419,7 @@ class FieldCacheImpl implements FieldCache {
res = new FixedBitSet(maxDoc);
}
- docs = termsEnum.docs(null, docs, DocsEnum.FLAG_NONE);
+ docs = termsEnum.postings(null, docs, PostingsEnum.FLAG_NONE);
// TODO: use bulk API
while (true) {
final int docID = docs.nextDoc();
@@ -686,7 +686,7 @@ class FieldCacheImpl implements FieldCache {
if (terms != null) {
final TermsEnum termsEnum = terms.iterator(null);
- DocsEnum docs = null;
+ PostingsEnum docs = null;
while(true) {
final BytesRef term = termsEnum.next();
@@ -698,7 +698,7 @@ class FieldCacheImpl implements FieldCache {
}
termOrdToBytesOffset.add(bytes.copyUsingLengthPrefix(term));
- docs = termsEnum.docs(null, docs, DocsEnum.FLAG_NONE);
+ docs = termsEnum.postings(null, docs, PostingsEnum.FLAG_NONE);
while (true) {
final int docID = docs.nextDoc();
if (docID == DocIdSetIterator.NO_MORE_DOCS) {
@@ -836,7 +836,7 @@ class FieldCacheImpl implements FieldCache {
if (terms != null) {
int termCount = 0;
final TermsEnum termsEnum = terms.iterator(null);
- DocsEnum docs = null;
+ PostingsEnum docs = null;
while(true) {
if (termCount++ == termCountHardLimit) {
// app is misusing the API (there is more than
@@ -850,7 +850,7 @@ class FieldCacheImpl implements FieldCache {
break;
}
final long pointer = bytes.copyUsingLengthPrefix(term);
- docs = termsEnum.docs(null, docs, DocsEnum.FLAG_NONE);
+ docs = termsEnum.postings(null, docs, PostingsEnum.FLAG_NONE);
while (true) {
final int docID = docs.nextDoc();
if (docID == DocIdSetIterator.NO_MORE_DOCS) {
diff --git a/lucene/misc/src/test/org/apache/lucene/index/SorterTestBase.java b/lucene/misc/src/test/org/apache/lucene/index/SorterTestBase.java
index 30b0be7be0c..c6596ccc1ff 100644
--- a/lucene/misc/src/test/org/apache/lucene/index/SorterTestBase.java
+++ b/lucene/misc/src/test/org/apache/lucene/index/SorterTestBase.java
@@ -31,8 +31,8 @@ import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.document.BinaryDocValuesField;
import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.document.SortedDocValuesField;
@@ -40,27 +40,8 @@ import org.apache.lucene.document.SortedNumericDocValuesField;
import org.apache.lucene.document.SortedSetDocValuesField;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
-import org.apache.lucene.index.BinaryDocValues;
-import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
-import org.apache.lucene.index.FieldInvertState;
-import org.apache.lucene.index.IndexOptions;
-import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.NumericDocValues;
-import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.index.SlowCompositeReaderWrapper;
-import org.apache.lucene.index.SortedDocValues;
-import org.apache.lucene.index.SortedNumericDocValues;
-import org.apache.lucene.index.SortedSetDocValues;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.index.Terms;
-import org.apache.lucene.index.TermsEnum.SeekStatus;
-import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.index.SortingLeafReader.SortingDocsAndPositionsEnum;
import org.apache.lucene.index.SortingLeafReader.SortingDocsEnum;
+import org.apache.lucene.index.TermsEnum.SeekStatus;
import org.apache.lucene.search.CollectionStatistics;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.TermStatistics;
@@ -254,7 +235,7 @@ public abstract class SorterTestBase extends LuceneTestCase {
public void testDocsAndPositionsEnum() throws Exception {
TermsEnum termsEnum = sortedReader.terms(DOC_POSITIONS_FIELD).iterator(null);
assertEquals(SeekStatus.FOUND, termsEnum.seekCeil(new BytesRef(DOC_POSITIONS_TERM)));
- DocsAndPositionsEnum sortedPositions = termsEnum.docsAndPositions(null, null);
+ PostingsEnum sortedPositions = termsEnum.postings(null, null, PostingsEnum.FLAG_ALL);
int doc;
// test nextDoc()
@@ -270,10 +251,10 @@ public abstract class SorterTestBase extends LuceneTestCase {
}
// test advance()
- final DocsAndPositionsEnum reuse = sortedPositions;
- sortedPositions = termsEnum.docsAndPositions(null, reuse);
- if (sortedPositions instanceof SortingDocsAndPositionsEnum) {
- assertTrue(((SortingDocsAndPositionsEnum) sortedPositions).reused(reuse)); // make sure reuse worked
+ final PostingsEnum reuse = sortedPositions;
+ sortedPositions = termsEnum.postings(null, reuse, PostingsEnum.FLAG_ALL);
+ if (sortedPositions instanceof SortingDocsEnum) {
+ assertTrue(((SortingDocsEnum) sortedPositions).reused(reuse)); // make sure reuse worked
}
doc = 0;
while ((doc = sortedPositions.advance(doc + TestUtil.nextInt(random(), 1, 5))) != DocIdSetIterator.NO_MORE_DOCS) {
@@ -315,7 +296,7 @@ public abstract class SorterTestBase extends LuceneTestCase {
Bits mappedLiveDocs = randomLiveDocs(sortedReader.maxDoc());
TermsEnum termsEnum = sortedReader.terms(DOCS_ENUM_FIELD).iterator(null);
assertEquals(SeekStatus.FOUND, termsEnum.seekCeil(new BytesRef(DOCS_ENUM_TERM)));
- DocsEnum docs = termsEnum.docs(mappedLiveDocs, null);
+ PostingsEnum docs = termsEnum.postings(mappedLiveDocs, null);
int doc;
int prev = -1;
@@ -330,8 +311,8 @@ public abstract class SorterTestBase extends LuceneTestCase {
assertFalse("document " + prev + " not marked as deleted", mappedLiveDocs == null || mappedLiveDocs.get(prev));
}
- DocsEnum reuse = docs;
- docs = termsEnum.docs(mappedLiveDocs, reuse);
+ PostingsEnum reuse = docs;
+ docs = termsEnum.postings(mappedLiveDocs, reuse);
if (docs instanceof SortingDocsEnum) {
assertTrue(((SortingDocsEnum) docs).reused(reuse)); // make sure reuse worked
}
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/CustomScoreQuery.java b/lucene/queries/src/java/org/apache/lucene/queries/CustomScoreQuery.java
index 6a01a211761..78683be2e30 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/CustomScoreQuery.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/CustomScoreQuery.java
@@ -18,23 +18,24 @@ package org.apache.lucene.queries;
*/
import java.io.IOException;
+import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Set;
-import java.util.Arrays;
-import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.queries.function.FunctionQuery;
import org.apache.lucene.queries.function.ValueSource;
import org.apache.lucene.search.ComplexExplanation;
import org.apache.lucene.search.Explanation;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.Weight;
-import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight;
import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.ToStringUtils;
/**
@@ -286,6 +287,8 @@ public class CustomScoreQuery extends Query {
private final CustomScoreProvider provider;
private final float[] vScores; // reused in score() to avoid allocating this array for each doc
+ // TODO : can we use FilterScorer here instead?
+
// constructor
private CustomScorer(CustomScoreProvider provider, CustomWeight w, float qWeight,
Scorer subQueryScorer, Scorer[] valSrcScorers) {
@@ -327,6 +330,26 @@ public class CustomScoreQuery extends Query {
return subQueryScorer.freq();
}
+ @Override
+ public int nextPosition() throws IOException {
+ return subQueryScorer.nextPosition();
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return subQueryScorer.startOffset();
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return subQueryScorer.endOffset();
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return subQueryScorer.getPayload();
+ }
+
@Override
public Collection getChildren() {
return Collections.singleton(new ChildScorer(subQueryScorer, "CUSTOM"));
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/TermFilter.java b/lucene/queries/src/java/org/apache/lucene/queries/TermFilter.java
index c0759844094..a6be527ae50 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/TermFilter.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/TermFilter.java
@@ -18,7 +18,7 @@ package org.apache.lucene.queries;
*/
import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
@@ -69,7 +69,7 @@ final public class TermFilter extends Filter {
return new DocIdSet() {
@Override
public DocIdSetIterator iterator() throws IOException {
- return termsEnum.docs(acceptDocs, null, DocsEnum.FLAG_NONE);
+ return termsEnum.postings(acceptDocs, null, PostingsEnum.FLAG_NONE);
}
@Override
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/TermsFilter.java b/lucene/queries/src/java/org/apache/lucene/queries/TermsFilter.java
index b9b7e0f74c3..8c07d6861d0 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/TermsFilter.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/TermsFilter.java
@@ -24,7 +24,7 @@ import java.util.Collections;
import java.util.Iterator;
import java.util.List;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
@@ -199,7 +199,7 @@ public final class TermsFilter extends Filter implements Accountable {
final BytesRef spare = new BytesRef(this.termsBytes);
Terms terms = null;
TermsEnum termsEnum = null;
- DocsEnum docs = null;
+ PostingsEnum docs = null;
for (TermsAndField termsAndField : this.termsAndFields) {
if ((terms = fields.terms(termsAndField.field)) != null) {
termsEnum = terms.iterator(termsEnum); // this won't return null
@@ -207,7 +207,7 @@ public final class TermsFilter extends Filter implements Accountable {
spare.offset = offsets[i];
spare.length = offsets[i+1] - offsets[i];
if (termsEnum.seekExact(spare)) {
- docs = termsEnum.docs(acceptDocs, docs, DocsEnum.FLAG_NONE); // no freq since we don't need them
+ docs = termsEnum.postings(acceptDocs, docs, PostingsEnum.FLAG_NONE); // no freq since we don't need them
builder.or(docs);
}
}
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/BoostedQuery.java b/lucene/queries/src/java/org/apache/lucene/queries/function/BoostedQuery.java
index 157c66f8c50..2c76b00fa6f 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/BoostedQuery.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/BoostedQuery.java
@@ -17,18 +17,24 @@ package org.apache.lucene.queries.function;
* limitations under the License.
*/
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.search.*;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.util.Bits;
-import org.apache.lucene.util.ToStringUtils;
-
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
-import java.util.Set;
import java.util.Map;
+import java.util.Set;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.ComplexExplanation;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.FilterScorer;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.ToStringUtils;
/**
* Query that is boosted by a ValueSource
@@ -122,41 +128,24 @@ public class BoostedQuery extends Query {
}
- private class CustomScorer extends Scorer {
+ private class CustomScorer extends FilterScorer {
private final BoostedQuery.BoostedWeight weight;
private final float qWeight;
- private final Scorer scorer;
private final FunctionValues vals;
private final LeafReaderContext readerContext;
private CustomScorer(LeafReaderContext readerContext, BoostedQuery.BoostedWeight w, float qWeight,
Scorer scorer, ValueSource vs) throws IOException {
- super(w);
+ super(scorer);
this.weight = w;
this.qWeight = qWeight;
- this.scorer = scorer;
this.readerContext = readerContext;
this.vals = vs.getValues(weight.fcontext, readerContext);
}
- @Override
- public int docID() {
- return scorer.docID();
- }
-
- @Override
- public int advance(int target) throws IOException {
- return scorer.advance(target);
- }
-
- @Override
- public int nextDoc() throws IOException {
- return scorer.nextDoc();
- }
-
@Override
public float score() throws IOException {
- float score = qWeight * scorer.score() * vals.floatVal(scorer.docID());
+ float score = qWeight * in.score() * vals.floatVal(in.docID());
// Current Lucene priority queues can't handle NaN and -Infinity, so
// map to -Float.MAX_VALUE. This conditional handles both -infinity
@@ -164,14 +153,9 @@ public class BoostedQuery extends Query {
return score>Float.NEGATIVE_INFINITY ? score : -Float.MAX_VALUE;
}
- @Override
- public int freq() throws IOException {
- return scorer.freq();
- }
-
@Override
public Collection getChildren() {
- return Collections.singleton(new ChildScorer(scorer, "CUSTOM"));
+ return Collections.singleton(new ChildScorer(in, "CUSTOM"));
}
public Explanation explain(int doc) throws IOException {
@@ -187,10 +171,6 @@ public class BoostedQuery extends Query {
return res;
}
- @Override
- public long cost() {
- return scorer.cost();
- }
}
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/FunctionQuery.java b/lucene/queries/src/java/org/apache/lucene/queries/function/FunctionQuery.java
index 811b656ec12..d5e647ef62d 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/FunctionQuery.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/FunctionQuery.java
@@ -17,15 +17,21 @@ package org.apache.lucene.queries.function;
* limitations under the License.
*/
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.search.*;
-import org.apache.lucene.util.Bits;
-
import java.io.IOException;
-import java.util.Set;
import java.util.Map;
+import java.util.Set;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.ComplexExplanation;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
/**
@@ -166,6 +172,26 @@ public class FunctionQuery extends Query {
return 1;
}
+ @Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
public Explanation explain(int doc) throws IOException {
float sc = qWeight * vals.floatVal(doc);
@@ -177,6 +203,7 @@ public class FunctionQuery extends Query {
result.addDetail(new Explanation(weight.queryNorm,"queryNorm"));
return result;
}
+
}
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/ValueSourceScorer.java b/lucene/queries/src/java/org/apache/lucene/queries/function/ValueSourceScorer.java
index ec8aced2305..5b786c0ebd7 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/ValueSourceScorer.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/ValueSourceScorer.java
@@ -17,12 +17,13 @@ package org.apache.lucene.queries.function;
* limitations under the License.
*/
+import java.io.IOException;
+
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.util.Bits;
-
-import java.io.IOException;
+import org.apache.lucene.util.BytesRef;
/**
* {@link Scorer} which returns the result of {@link FunctionValues#floatVal(int)} as
@@ -92,6 +93,26 @@ public class ValueSourceScorer extends Scorer {
return 1;
}
+ @Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
@Override
public long cost() {
return maxDoc;
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/QueryValueSource.java b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/QueryValueSource.java
index 2b565dc891c..dab719471d3 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/QueryValueSource.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/QueryValueSource.java
@@ -17,19 +17,22 @@
package org.apache.lucene.queries.function.valuesource;
+import java.io.IOException;
+import java.util.Map;
+
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.ReaderUtil;
import org.apache.lucene.queries.function.FunctionValues;
import org.apache.lucene.queries.function.ValueSource;
import org.apache.lucene.queries.function.docvalues.FloatDocValues;
-import org.apache.lucene.search.*;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.mutable.MutableValue;
import org.apache.lucene.util.mutable.MutableValueFloat;
-import java.io.IOException;
-import java.util.Map;
-
/**
* QueryValueSource returns the relevance score of the query
*/
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/TFValueSource.java b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/TFValueSource.java
index 4d73d559ec2..e468df6ca19 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/TFValueSource.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/TFValueSource.java
@@ -17,7 +17,14 @@ package org.apache.lucene.queries.function.valuesource;
* limitations under the License.
*/
-import org.apache.lucene.index.*;
+import java.io.IOException;
+import java.util.Map;
+
+import org.apache.lucene.index.PostingsEnum;
+import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.queries.function.FunctionValues;
import org.apache.lucene.queries.function.docvalues.FloatDocValues;
import org.apache.lucene.search.DocIdSetIterator;
@@ -25,9 +32,6 @@ import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.similarities.TFIDFSimilarity;
import org.apache.lucene.util.BytesRef;
-import java.io.IOException;
-import java.util.Map;
-
/**
* Function that returns {@link TFIDFSimilarity#tf(float)}
* for every document.
@@ -56,7 +60,7 @@ public class TFValueSource extends TermFreqValueSource {
}
return new FloatDocValues(this) {
- DocsEnum docs ;
+ PostingsEnum docs ;
int atDoc;
int lastDocRequested = -1;
@@ -68,7 +72,7 @@ public class TFValueSource extends TermFreqValueSource {
if (terms != null) {
final TermsEnum termsEnum = terms.iterator(null);
if (termsEnum.seekExact(indexedBytes)) {
- docs = termsEnum.docs(null, null);
+ docs = termsEnum.postings(null, null);
} else {
docs = null;
}
@@ -77,12 +81,32 @@ public class TFValueSource extends TermFreqValueSource {
}
if (docs == null) {
- docs = new DocsEnum() {
+ docs = new PostingsEnum() {
@Override
public int freq() {
return 0;
}
+ @Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
@Override
public int docID() {
return DocIdSetIterator.NO_MORE_DOCS;
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/TermFreqValueSource.java b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/TermFreqValueSource.java
index b5e4bc28ea1..b63f4ce4700 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/TermFreqValueSource.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/TermFreqValueSource.java
@@ -17,17 +17,21 @@
package org.apache.lucene.queries.function.valuesource;
-import org.apache.lucene.index.*;
+import java.io.IOException;
+import java.util.Map;
+
+import org.apache.lucene.index.PostingsEnum;
+import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.queries.function.FunctionValues;
import org.apache.lucene.queries.function.docvalues.IntDocValues;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.util.BytesRef;
-import java.io.IOException;
-import java.util.Map;
-
/**
- * Function that returns {@link DocsEnum#freq()} for the
+ * Function that returns {@link org.apache.lucene.index.PostingsEnum#freq()} for the
* supplied term in every document.
*
* If the term does not exist in the document, returns 0.
@@ -49,7 +53,7 @@ public class TermFreqValueSource extends DocFreqValueSource {
final Terms terms = fields.terms(indexedField);
return new IntDocValues(this) {
- DocsEnum docs ;
+ PostingsEnum docs ;
int atDoc;
int lastDocRequested = -1;
@@ -61,7 +65,7 @@ public class TermFreqValueSource extends DocFreqValueSource {
if (terms != null) {
final TermsEnum termsEnum = terms.iterator(null);
if (termsEnum.seekExact(indexedBytes)) {
- docs = termsEnum.docs(null, null);
+ docs = termsEnum.postings(null, null);
} else {
docs = null;
}
@@ -70,12 +74,32 @@ public class TermFreqValueSource extends DocFreqValueSource {
}
if (docs == null) {
- docs = new DocsEnum() {
+ docs = new PostingsEnum() {
@Override
public int freq() {
return 0;
}
+ @Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
@Override
public int docID() {
return DocIdSetIterator.NO_MORE_DOCS;
diff --git a/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/IDVersionPostingsReader.java b/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/IDVersionPostingsReader.java
index b716acc8e8c..4ffa359efec 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/IDVersionPostingsReader.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/IDVersionPostingsReader.java
@@ -22,8 +22,7 @@ import java.io.IOException;
import org.apache.lucene.codecs.BlockTermState;
import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.codecs.PostingsReaderBase;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.SegmentReadState;
import org.apache.lucene.store.DataInput;
@@ -64,9 +63,22 @@ final class IDVersionPostingsReader extends PostingsReaderBase {
}
@Override
- public DocsEnum docs(FieldInfo fieldInfo, BlockTermState termState, Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
+ public PostingsEnum postings(FieldInfo fieldInfo, BlockTermState termState, Bits liveDocs, PostingsEnum reuse, int flags) throws IOException {
SingleDocsEnum docsEnum;
+ if (PostingsEnum.requiresPositions(flags)) {
+ SinglePostingsEnum posEnum;
+
+ if (reuse instanceof SinglePostingsEnum) {
+ posEnum = (SinglePostingsEnum) reuse;
+ } else {
+ posEnum = new SinglePostingsEnum();
+ }
+ IDVersionTermState _termState = (IDVersionTermState) termState;
+ posEnum.reset(_termState.docID, _termState.idVersion, liveDocs);
+ return posEnum;
+ }
+
if (reuse instanceof SingleDocsEnum) {
docsEnum = (SingleDocsEnum) reuse;
} else {
@@ -77,21 +89,6 @@ final class IDVersionPostingsReader extends PostingsReaderBase {
return docsEnum;
}
- @Override
- public DocsAndPositionsEnum docsAndPositions(FieldInfo fieldInfo, BlockTermState _termState, Bits liveDocs,
- DocsAndPositionsEnum reuse, int flags) {
- SingleDocsAndPositionsEnum posEnum;
-
- if (reuse instanceof SingleDocsAndPositionsEnum) {
- posEnum = (SingleDocsAndPositionsEnum) reuse;
- } else {
- posEnum = new SingleDocsAndPositionsEnum();
- }
- IDVersionTermState termState = (IDVersionTermState) _termState;
- posEnum.reset(termState.docID, termState.idVersion, liveDocs);
- return posEnum;
- }
-
@Override
public long ramBytesUsed() {
return 0;
diff --git a/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/IDVersionSegmentTermsEnum.java b/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/IDVersionSegmentTermsEnum.java
index 481e74d9ad6..3a7e5343a65 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/IDVersionSegmentTermsEnum.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/IDVersionSegmentTermsEnum.java
@@ -21,9 +21,7 @@ import java.io.IOException;
import java.io.PrintStream;
import org.apache.lucene.codecs.BlockTermState;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
-import org.apache.lucene.index.IndexOptions;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.TermState;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.ByteArrayDataInput;
@@ -997,7 +995,7 @@ public final class IDVersionSegmentTermsEnum extends TermsEnum {
}
@Override
- public DocsEnum docs(Bits skipDocs, DocsEnum reuse, int flags) throws IOException {
+ public PostingsEnum postings(Bits skipDocs, PostingsEnum reuse, int flags) throws IOException {
assert !eof;
//if (DEBUG) {
//System.out.println("BTTR.docs seg=" + segment);
@@ -1006,19 +1004,7 @@ public final class IDVersionSegmentTermsEnum extends TermsEnum {
//if (DEBUG) {
//System.out.println(" state=" + currentFrame.state);
//}
- return fr.parent.postingsReader.docs(fr.fieldInfo, currentFrame.state, skipDocs, reuse, flags);
- }
-
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits skipDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
- if (fr.fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
- // Positions were not indexed:
- return null;
- }
-
- assert !eof;
- currentFrame.decodeMetaData();
- return fr.parent.postingsReader.docsAndPositions(fr.fieldInfo, currentFrame.state, skipDocs, reuse, flags);
+ return fr.parent.postingsReader.postings(fr.fieldInfo, currentFrame.state, skipDocs, reuse, flags);
}
@Override
diff --git a/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/SingleDocsEnum.java b/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/SingleDocsEnum.java
index b29619c7af6..caed5cd907f 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/SingleDocsEnum.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/SingleDocsEnum.java
@@ -17,10 +17,13 @@ package org.apache.lucene.codecs.idversion;
* limitations under the License.
*/
-import org.apache.lucene.index.DocsEnum;
-import org.apache.lucene.util.Bits;
+import java.io.IOException;
-class SingleDocsEnum extends DocsEnum {
+import org.apache.lucene.index.PostingsEnum;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+
+class SingleDocsEnum extends PostingsEnum {
private int doc;
private int singleDocID;
@@ -68,4 +71,24 @@ class SingleDocsEnum extends DocsEnum {
public int freq() {
return 1;
}
+
+ @Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ throw new UnsupportedOperationException();
+ }
}
diff --git a/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/SingleDocsAndPositionsEnum.java b/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/SinglePostingsEnum.java
similarity index 93%
rename from lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/SingleDocsAndPositionsEnum.java
rename to lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/SinglePostingsEnum.java
index eecc700c44a..63edbe5c189 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/SingleDocsAndPositionsEnum.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/SinglePostingsEnum.java
@@ -17,11 +17,11 @@ package org.apache.lucene.codecs.idversion;
* limitations under the License.
*/
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
-class SingleDocsAndPositionsEnum extends DocsAndPositionsEnum {
+class SinglePostingsEnum extends PostingsEnum {
private int doc;
private int pos;
private int singleDocID;
@@ -29,7 +29,7 @@ class SingleDocsAndPositionsEnum extends DocsAndPositionsEnum {
private long version;
private final BytesRef payload;
- public SingleDocsAndPositionsEnum() {
+ public SinglePostingsEnum() {
payload = new BytesRef(8);
payload.length = 8;
}
diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/DuplicateFilter.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/DuplicateFilter.java
index 826614c3dc0..3648003534d 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/DuplicateFilter.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/DuplicateFilter.java
@@ -18,7 +18,7 @@ package org.apache.lucene.sandbox.queries;
import java.io.IOException;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.SlowCompositeReaderWrapper;
@@ -100,13 +100,13 @@ public class DuplicateFilter extends Filter {
if (terms != null) {
TermsEnum termsEnum = terms.iterator(null);
- DocsEnum docs = null;
+ PostingsEnum docs = null;
while (true) {
BytesRef currTerm = termsEnum.next();
if (currTerm == null) {
break;
} else {
- docs = termsEnum.docs(acceptDocs, docs, DocsEnum.FLAG_NONE);
+ docs = termsEnum.postings(acceptDocs, docs, PostingsEnum.FLAG_NONE);
int doc = docs.nextDoc();
if (doc != DocIdSetIterator.NO_MORE_DOCS) {
if (keepMode == KeepMode.KM_USE_FIRST_OCCURRENCE) {
@@ -136,7 +136,7 @@ public class DuplicateFilter extends Filter {
if (terms != null) {
TermsEnum termsEnum = terms.iterator(null);
- DocsEnum docs = null;
+ PostingsEnum docs = null;
while (true) {
BytesRef currTerm = termsEnum.next();
if (currTerm == null) {
@@ -144,7 +144,7 @@ public class DuplicateFilter extends Filter {
} else {
if (termsEnum.docFreq() > 1) {
// unset potential duplicates
- docs = termsEnum.docs(acceptDocs, docs, DocsEnum.FLAG_NONE);
+ docs = termsEnum.postings(acceptDocs, docs, PostingsEnum.FLAG_NONE);
int doc = docs.nextDoc();
if (doc != DocIdSetIterator.NO_MORE_DOCS) {
if (keepMode == KeepMode.KM_USE_FIRST_OCCURRENCE) {
diff --git a/lucene/sandbox/src/java/org/apache/lucene/search/TermAutomatonQuery.java b/lucene/sandbox/src/java/org/apache/lucene/search/TermAutomatonQuery.java
index 752271838b2..428fe32178a 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/search/TermAutomatonQuery.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/search/TermAutomatonQuery.java
@@ -24,7 +24,7 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexReaderContext;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.ReaderUtil;
@@ -325,7 +325,7 @@ public class TermAutomatonQuery extends Query {
static class EnumAndScorer {
public final int termID;
- public final DocsAndPositionsEnum posEnum;
+ public final PostingsEnum posEnum;
// How many positions left in the current document:
public int posLeft;
@@ -333,7 +333,7 @@ public class TermAutomatonQuery extends Query {
// Current position
public int pos;
- public EnumAndScorer(int termID, DocsAndPositionsEnum posEnum) {
+ public EnumAndScorer(int termID, PostingsEnum posEnum) {
this.termID = termID;
this.posEnum = posEnum;
}
@@ -399,8 +399,7 @@ public class TermAutomatonQuery extends Query {
TermsEnum termsEnum = context.reader().terms(field).iterator(null);
termsEnum.seekExact(term, state);
- enums[ent.getKey()] = new EnumAndScorer(ent.getKey(),
- termsEnum.docsAndPositions(acceptDocs, null, 0));
+ enums[ent.getKey()] = new EnumAndScorer(ent.getKey(), termsEnum.postings(acceptDocs, null, PostingsEnum.FLAG_POSITIONS));
}
}
diff --git a/lucene/sandbox/src/java/org/apache/lucene/search/TermAutomatonScorer.java b/lucene/sandbox/src/java/org/apache/lucene/search/TermAutomatonScorer.java
index 106c307574a..c7f25caebc7 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/search/TermAutomatonScorer.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/search/TermAutomatonScorer.java
@@ -325,6 +325,26 @@ class TermAutomatonScorer extends Scorer {
return freq;
}
+ @Override
+ public int nextPosition() throws IOException {
+ return -1; // TODO can we get positional information out of this Scorer?
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
@Override
public int docID() {
return docID;
diff --git a/lucene/sandbox/src/test/org/apache/lucene/codecs/idversion/TestIDVersionPostingsFormat.java b/lucene/sandbox/src/test/org/apache/lucene/codecs/idversion/TestIDVersionPostingsFormat.java
index b8cfe3dc4bb..64137aa4b59 100644
--- a/lucene/sandbox/src/test/org/apache/lucene/codecs/idversion/TestIDVersionPostingsFormat.java
+++ b/lucene/sandbox/src/test/org/apache/lucene/codecs/idversion/TestIDVersionPostingsFormat.java
@@ -30,7 +30,6 @@ import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.Analyzer.TokenStreamComponents;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenFilter;
import org.apache.lucene.analysis.MockTokenizer;
@@ -39,7 +38,7 @@ import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.index.ConcurrentMergeScheduler;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
@@ -331,9 +330,9 @@ public class TestIDVersionPostingsFormat extends LuceneTestCase {
if (VERBOSE) {
System.out.println(" found in seg=" + termsEnums[seg]);
}
- docsEnums[seg] = termsEnums[seg].docs(liveDocs[seg], docsEnums[seg], 0);
- int docID = docsEnums[seg].nextDoc();
- if (docID != DocsEnum.NO_MORE_DOCS) {
+ postingsEnums[seg] = termsEnums[seg].postings(liveDocs[seg], postingsEnums[seg], 0);
+ int docID = postingsEnums[seg].nextDoc();
+ if (docID != PostingsEnum.NO_MORE_DOCS) {
lastVersion = ((IDVersionSegmentTermsEnum) termsEnums[seg]).getVersion();
return docBases[seg] + docID;
}
diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java b/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java
index ffe8c346aea..6ba5e5613e0 100644
--- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java
+++ b/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java
@@ -134,7 +134,7 @@ public class DuplicateFilterTest extends LuceneTestCase {
for (ScoreDoc hit : hits) {
StoredDocument d = searcher.doc(hit.doc);
String url = d.get(KEY_FIELD);
- DocsEnum td = TestUtil.docs(random(), reader,
+ PostingsEnum td = TestUtil.docs(random(), reader,
KEY_FIELD,
new BytesRef(url),
MultiFields.getLiveDocs(reader),
@@ -158,7 +158,7 @@ public class DuplicateFilterTest extends LuceneTestCase {
for (ScoreDoc hit : hits) {
StoredDocument d = searcher.doc(hit.doc);
String url = d.get(KEY_FIELD);
- DocsEnum td = TestUtil.docs(random(), reader,
+ PostingsEnum td = TestUtil.docs(random(), reader,
KEY_FIELD,
new BytesRef(url),
MultiFields.getLiveDocs(reader),
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/AbstractPrefixTreeFilter.java b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/AbstractPrefixTreeFilter.java
index 6a2e2834d9f..47f3bc50979 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/AbstractPrefixTreeFilter.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/AbstractPrefixTreeFilter.java
@@ -20,7 +20,7 @@ package org.apache.lucene.spatial.prefix;
import java.io.IOException;
import com.spatial4j.core.shape.Shape;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Terms;
@@ -80,7 +80,7 @@ public abstract class AbstractPrefixTreeFilter extends Filter {
protected final int maxDoc;
protected TermsEnum termsEnum;//remember to check for null!
- protected DocsEnum docsEnum;
+ protected PostingsEnum postingsEnum;
public BaseTermsEnumTraverser(LeafReaderContext context, Bits acceptDocs) throws IOException {
this.context = context;
@@ -94,8 +94,8 @@ public abstract class AbstractPrefixTreeFilter extends Filter {
protected void collectDocs(BitSet bitSet) throws IOException {
assert termsEnum != null;
- docsEnum = termsEnum.docs(acceptDocs, docsEnum, DocsEnum.FLAG_NONE);
- bitSet.or(docsEnum);
+ postingsEnum = termsEnum.postings(acceptDocs, postingsEnum, PostingsEnum.FLAG_NONE);
+ bitSet.or(postingsEnum);
}
}
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/ContainsPrefixTreeFilter.java b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/ContainsPrefixTreeFilter.java
index 2ffa86b446e..108effee4e7 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/ContainsPrefixTreeFilter.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/ContainsPrefixTreeFilter.java
@@ -21,7 +21,7 @@ import com.spatial4j.core.shape.Shape;
import com.spatial4j.core.shape.SpatialRelation;
import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.spatial.prefix.tree.Cell;
@@ -176,9 +176,9 @@ public class ContainsPrefixTreeFilter extends AbstractPrefixTreeFilter {
private SmallDocSet collectDocs(Bits acceptContains) throws IOException {
SmallDocSet set = null;
- docsEnum = termsEnum.docs(acceptContains, docsEnum, DocsEnum.FLAG_NONE);
+ postingsEnum = termsEnum.postings(acceptContains, postingsEnum, PostingsEnum.FLAG_NONE);
int docid;
- while ((docid = docsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
+ while ((docid = postingsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
if (set == null) {
int size = termsEnum.docFreq();
if (size <= 0)
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/PrefixTreeFacetCounter.java b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/PrefixTreeFacetCounter.java
index edfcc9e3d77..92671de7a8d 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/PrefixTreeFacetCounter.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/PrefixTreeFacetCounter.java
@@ -20,7 +20,7 @@ package org.apache.lucene.spatial.prefix;
import java.io.IOException;
import com.spatial4j.core.shape.Shape;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexReaderContext;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.DocIdSet;
@@ -168,8 +168,8 @@ public class PrefixTreeFacetCounter {
return termsEnum.docFreq();
}
int count = 0;
- docsEnum = termsEnum.docs(acceptDocs, docsEnum, DocsEnum.FLAG_NONE);
- while (docsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
+ postingsEnum = termsEnum.postings(acceptDocs, postingsEnum, PostingsEnum.FLAG_NONE);
+ while (postingsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
count++;
}
return count;
@@ -179,8 +179,8 @@ public class PrefixTreeFacetCounter {
if (acceptDocs == null) {
return true;
}
- docsEnum = termsEnum.docs(acceptDocs, docsEnum, DocsEnum.FLAG_NONE);
- return (docsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
+ postingsEnum = termsEnum.postings(acceptDocs, postingsEnum, PostingsEnum.FLAG_NONE);
+ return (postingsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
}
}.getDocIdSet();
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/util/ShapeFieldCacheProvider.java b/lucene/spatial/src/java/org/apache/lucene/spatial/util/ShapeFieldCacheProvider.java
index bf5b7260924..cb299590090 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/util/ShapeFieldCacheProvider.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/util/ShapeFieldCacheProvider.java
@@ -61,7 +61,7 @@ public abstract class ShapeFieldCacheProvider {
log.fine("Building Cache [" + reader.maxDoc() + "]");
idx = new ShapeFieldCache<>(reader.maxDoc(),defaultSize);
int count = 0;
- DocsEnum docs = null;
+ PostingsEnum docs = null;
Terms terms = reader.terms(shapeField);
TermsEnum te = null;
if (terms != null) {
@@ -70,7 +70,7 @@ public abstract class ShapeFieldCacheProvider {
while (term != null) {
T shape = readShape(term);
if( shape != null ) {
- docs = te.docs(null, docs, DocsEnum.FLAG_NONE);
+ docs = te.postings(null, docs, PostingsEnum.FLAG_NONE);
Integer docid = docs.nextDoc();
while (docid != DocIdSetIterator.NO_MORE_DOCS) {
idx.add( docid, shape );
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggester.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggester.java
index e49a88691eb..f74e13234be 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggester.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggester.java
@@ -29,7 +29,7 @@ import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.BinaryDocValues;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.MultiDocValues;
import org.apache.lucene.index.Terms;
@@ -263,7 +263,7 @@ public class BlendedInfixSuggester extends AnalyzingInfixSuggester {
if (matchedTokens.contains(docTerm) || (prefixToken != null && docTerm.startsWith(prefixToken))) {
- DocsAndPositionsEnum docPosEnum = it.docsAndPositions(null, null, DocsAndPositionsEnum.FLAG_OFFSETS);
+ PostingsEnum docPosEnum = it.postings(null, null, PostingsEnum.FLAG_OFFSETS);
docPosEnum.nextDoc();
// use the first occurrence of the term
diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingPostingsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingPostingsFormat.java
index d08a72fbde3..8d060e01e47 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingPostingsFormat.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingPostingsFormat.java
@@ -25,8 +25,7 @@ import org.apache.lucene.codecs.FieldsConsumer;
import org.apache.lucene.codecs.FieldsProducer;
import org.apache.lucene.codecs.PostingsFormat;
import org.apache.lucene.index.AssertingLeafReader;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.IndexOptions;
@@ -160,8 +159,7 @@ public final class AssertingPostingsFormat extends PostingsFormat {
termsEnum = terms.iterator(termsEnum);
BytesRefBuilder lastTerm = null;
- DocsEnum docsEnum = null;
- DocsAndPositionsEnum posEnum = null;
+ PostingsEnum postingsEnum = null;
boolean hasFreqs = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) >= 0;
boolean hasPositions = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0;
@@ -187,46 +185,46 @@ public final class AssertingPostingsFormat extends PostingsFormat {
int flags = 0;
if (hasPositions == false) {
if (hasFreqs) {
- flags = flags | DocsEnum.FLAG_FREQS;
+ flags = flags | PostingsEnum.FLAG_FREQS;
}
- docsEnum = termsEnum.docs(null, docsEnum, flags);
+ postingsEnum = termsEnum.postings(null, postingsEnum, flags);
} else {
+ flags = PostingsEnum.FLAG_POSITIONS;
if (hasPayloads) {
- flags |= DocsAndPositionsEnum.FLAG_PAYLOADS;
+ flags |= PostingsEnum.FLAG_PAYLOADS;
}
if (hasOffsets) {
- flags = flags | DocsAndPositionsEnum.FLAG_OFFSETS;
+ flags = flags | PostingsEnum.FLAG_OFFSETS;
}
- posEnum = termsEnum.docsAndPositions(null, posEnum, flags);
- docsEnum = posEnum;
+ postingsEnum = termsEnum.postings(null, postingsEnum, flags);
}
- assert docsEnum != null : "termsEnum=" + termsEnum + " hasPositions=" + hasPositions;
+ assert postingsEnum != null : "termsEnum=" + termsEnum + " hasPositions=" + hasPositions;
int lastDocID = -1;
while(true) {
- int docID = docsEnum.nextDoc();
- if (docID == DocsEnum.NO_MORE_DOCS) {
+ int docID = postingsEnum.nextDoc();
+ if (docID == PostingsEnum.NO_MORE_DOCS) {
break;
}
assert docID > lastDocID;
lastDocID = docID;
if (hasFreqs) {
- int freq = docsEnum.freq();
+ int freq = postingsEnum.freq();
assert freq > 0;
if (hasPositions) {
int lastPos = -1;
int lastStartOffset = -1;
for(int i=0;i= lastPos: "pos=" + pos + " vs lastPos=" + lastPos + " i=" + i + " freq=" + freq;
lastPos = pos;
if (hasOffsets) {
- int startOffset = posEnum.startOffset();
- int endOffset = posEnum.endOffset();
+ int startOffset = postingsEnum.startOffset();
+ int endOffset = postingsEnum.endOffset();
assert endOffset >= startOffset;
assert startOffset >= lastStartOffset;
lastStartOffset = startOffset;
diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java
index aeb3521d81b..620df27ee0d 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java
@@ -34,8 +34,7 @@ import org.apache.lucene.codecs.FieldsConsumer;
import org.apache.lucene.codecs.FieldsProducer;
import org.apache.lucene.codecs.PostingsFormat;
import org.apache.lucene.codecs.TermStats;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.IndexFileNames;
@@ -252,8 +251,7 @@ public final class RAMOnlyPostingsFormat extends PostingsFormat {
FixedBitSet docsSeen = new FixedBitSet(state.segmentInfo.getDocCount());
long sumTotalTermFreq = 0;
long sumDocFreq = 0;
- DocsEnum docsEnum = null;
- DocsAndPositionsEnum posEnum = null;
+ PostingsEnum postingsEnum = null;
int enumFlags;
IndexOptions indexOptions = fieldInfo.getIndexOptions();
@@ -265,18 +263,18 @@ public final class RAMOnlyPostingsFormat extends PostingsFormat {
if (writeFreqs == false) {
enumFlags = 0;
} else if (writePositions == false) {
- enumFlags = DocsEnum.FLAG_FREQS;
+ enumFlags = PostingsEnum.FLAG_FREQS;
} else if (writeOffsets == false) {
if (writePayloads) {
- enumFlags = DocsAndPositionsEnum.FLAG_PAYLOADS;
+ enumFlags = PostingsEnum.FLAG_PAYLOADS;
} else {
enumFlags = 0;
}
} else {
if (writePayloads) {
- enumFlags = DocsAndPositionsEnum.FLAG_PAYLOADS | DocsAndPositionsEnum.FLAG_OFFSETS;
+ enumFlags = PostingsEnum.FLAG_PAYLOADS | PostingsEnum.FLAG_OFFSETS;
} else {
- enumFlags = DocsAndPositionsEnum.FLAG_OFFSETS;
+ enumFlags = PostingsEnum.FLAG_OFFSETS;
}
}
@@ -286,20 +284,13 @@ public final class RAMOnlyPostingsFormat extends PostingsFormat {
break;
}
RAMPostingsWriterImpl postingsWriter = termsConsumer.startTerm(term);
-
- if (writePositions) {
- posEnum = termsEnum.docsAndPositions(null, posEnum, enumFlags);
- docsEnum = posEnum;
- } else {
- docsEnum = termsEnum.docs(null, docsEnum, enumFlags);
- posEnum = null;
- }
+ postingsEnum = termsEnum.postings(null, postingsEnum, enumFlags);
int docFreq = 0;
long totalTermFreq = 0;
while (true) {
- int docID = docsEnum.nextDoc();
- if (docID == DocsEnum.NO_MORE_DOCS) {
+ int docID = postingsEnum.nextDoc();
+ if (docID == PostingsEnum.NO_MORE_DOCS) {
break;
}
docsSeen.set(docID);
@@ -307,7 +298,7 @@ public final class RAMOnlyPostingsFormat extends PostingsFormat {
int freq;
if (writeFreqs) {
- freq = docsEnum.freq();
+ freq = postingsEnum.freq();
totalTermFreq += freq;
} else {
freq = -1;
@@ -316,13 +307,13 @@ public final class RAMOnlyPostingsFormat extends PostingsFormat {
postingsWriter.startDoc(docID, freq);
if (writePositions) {
for (int i=0;i doc : "backwards nextDoc from " + doc + " to " + nextDoc + " " + in;
if (nextDoc == DocIdSetIterator.NO_MORE_DOCS) {
state = DocsEnumState.FINISHED;
+ positionMax = 0;
} else {
state = DocsEnumState.ITERATING;
+ positionMax = super.freq();
}
+ positionCount = 0;
assert super.docID() == nextDoc;
return doc = nextDoc;
}
@@ -308,78 +299,12 @@ public class AssertingLeafReader extends FilterLeafReader {
assert advanced >= target : "backwards advance from: " + target + " to: " + advanced;
if (advanced == DocIdSetIterator.NO_MORE_DOCS) {
state = DocsEnumState.FINISHED;
- } else {
- state = DocsEnumState.ITERATING;
- }
- assert super.docID() == advanced;
- return doc = advanced;
- }
-
- @Override
- public int docID() {
- assertThread("Docs enums", creationThread);
- assert doc == super.docID() : " invalid docID() in " + in.getClass() + " " + super.docID() + " instead of " + doc;
- return doc;
- }
-
- @Override
- public int freq() throws IOException {
- assertThread("Docs enums", creationThread);
- assert state != DocsEnumState.START : "freq() called before nextDoc()/advance()";
- assert state != DocsEnumState.FINISHED : "freq() called after NO_MORE_DOCS";
- int freq = super.freq();
- assert freq > 0;
- return freq;
- }
- }
-
- static class AssertingDocsAndPositionsEnum extends FilterDocsAndPositionsEnum {
- private final Thread creationThread = Thread.currentThread();
- private DocsEnumState state = DocsEnumState.START;
- private int positionMax = 0;
- private int positionCount = 0;
- private int doc;
-
- public AssertingDocsAndPositionsEnum(DocsAndPositionsEnum in) {
- super(in);
- int docid = in.docID();
- assert docid == -1 : "invalid initial doc id: " + docid;
- doc = -1;
- }
-
- @Override
- public int nextDoc() throws IOException {
- assertThread("Docs enums", creationThread);
- assert state != DocsEnumState.FINISHED : "nextDoc() called after NO_MORE_DOCS";
- int nextDoc = super.nextDoc();
- assert nextDoc > doc : "backwards nextDoc from " + doc + " to " + nextDoc;
- positionCount = 0;
- if (nextDoc == DocIdSetIterator.NO_MORE_DOCS) {
- state = DocsEnumState.FINISHED;
positionMax = 0;
} else {
state = DocsEnumState.ITERATING;
positionMax = super.freq();
}
- assert super.docID() == nextDoc;
- return doc = nextDoc;
- }
-
- @Override
- public int advance(int target) throws IOException {
- assertThread("Docs enums", creationThread);
- assert state != DocsEnumState.FINISHED : "advance() called after NO_MORE_DOCS";
- assert target > doc : "target must be > docID(), got " + target + " <= " + doc;
- int advanced = super.advance(target);
- assert advanced >= target : "backwards advance from: " + target + " to: " + advanced;
positionCount = 0;
- if (advanced == DocIdSetIterator.NO_MORE_DOCS) {
- state = DocsEnumState.FINISHED;
- positionMax = 0;
- } else {
- state = DocsEnumState.ITERATING;
- positionMax = super.freq();
- }
assert super.docID() == advanced;
return doc = advanced;
}
@@ -403,7 +328,6 @@ public class AssertingLeafReader extends FilterLeafReader {
@Override
public int nextPosition() throws IOException {
- assertThread("Docs enums", creationThread);
assert state != DocsEnumState.START : "nextPosition() called before nextDoc()/advance()";
assert state != DocsEnumState.FINISHED : "nextPosition() called after NO_MORE_DOCS";
assert positionCount < positionMax : "nextPosition() called more than freq() times!";
@@ -415,7 +339,6 @@ public class AssertingLeafReader extends FilterLeafReader {
@Override
public int startOffset() throws IOException {
- assertThread("Docs enums", creationThread);
assert state != DocsEnumState.START : "startOffset() called before nextDoc()/advance()";
assert state != DocsEnumState.FINISHED : "startOffset() called after NO_MORE_DOCS";
assert positionCount > 0 : "startOffset() called before nextPosition()!";
@@ -424,7 +347,6 @@ public class AssertingLeafReader extends FilterLeafReader {
@Override
public int endOffset() throws IOException {
- assertThread("Docs enums", creationThread);
assert state != DocsEnumState.START : "endOffset() called before nextDoc()/advance()";
assert state != DocsEnumState.FINISHED : "endOffset() called after NO_MORE_DOCS";
assert positionCount > 0 : "endOffset() called before nextPosition()!";
@@ -433,16 +355,15 @@ public class AssertingLeafReader extends FilterLeafReader {
@Override
public BytesRef getPayload() throws IOException {
- assertThread("Docs enums", creationThread);
assert state != DocsEnumState.START : "getPayload() called before nextDoc()/advance()";
assert state != DocsEnumState.FINISHED : "getPayload() called after NO_MORE_DOCS";
assert positionCount > 0 : "getPayload() called before nextPosition()!";
BytesRef payload = super.getPayload();
- assert payload == null || payload.isValid() && payload.length > 0 : "getPayload() returned payload with invalid length!";
+ assert payload == null || payload.length > 0 : "getPayload() returned payload with invalid length!";
return payload;
}
}
-
+
/** Wraps a NumericDocValues but with additional asserts */
public static class AssertingNumericDocValues extends NumericDocValues {
private final Thread creationThread = Thread.currentThread();
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java
index fcbf1762ec7..97870e98937 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java
@@ -17,8 +17,6 @@ package org.apache.lucene.index;
* limitations under the License.
*/
-import static org.apache.lucene.index.SortedSetDocValues.NO_MORE_ORDS;
-
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintStream;
@@ -63,6 +61,8 @@ import org.apache.lucene.util.BytesRefHash;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.TestUtil;
+import static org.apache.lucene.index.SortedSetDocValues.NO_MORE_ORDS;
+
/**
* Abstract class to do basic tests for a docvalues format.
* NOTE: This test focuses on the docvalues impl, nothing else.
@@ -1156,8 +1156,8 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
for (Entry entry : entrySet) {
// pk lookup
- DocsEnum termDocsEnum = slowR.termDocsEnum(new Term("id", entry.getKey()));
- int docId = termDocsEnum.nextDoc();
+ PostingsEnum termPostingsEnum = slowR.termDocsEnum(new Term("id", entry.getKey()));
+ int docId = termPostingsEnum.nextDoc();
expected = new BytesRef(entry.getValue());
final BytesRef actual = docValues.get(docId);
assertEquals(expected, actual);
@@ -2085,7 +2085,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
);
}
}
-
+
public void testSortedNumericsMultipleValuesVsStoredFields() throws Exception {
assumeTrue("Codec does not support SORTED_NUMERIC", codecSupportsSortedNumeric());
int numIterations = atLeast(1);
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java
index 40dd8818a77..44def0e761a 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java
@@ -97,7 +97,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
// Sometimes use .advance():
SKIPPING,
- // Sometimes reuse the Docs/AndPositionsEnum across terms:
+ // Sometimes reuse the PostingsEnum across terms:
REUSE_ENUMS,
// Sometimes pass non-null live docs:
@@ -121,7 +121,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
/** Given the same random seed this always enumerates the
* same random postings */
- private static class SeedPostings extends DocsAndPositionsEnum {
+ private static class SeedPostings extends PostingsEnum {
// Used only to generate docIDs; this way if you pull w/
// or w/o positions you get the same docID sequence:
private final Random docRandom;
@@ -234,7 +234,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
return 0;
}
assert posUpto < freq;
-
+
if (posUpto == 0 && random.nextBoolean()) {
// Sometimes index pos = 0
} else if (posSpacing == 1) {
@@ -270,7 +270,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
posUpto++;
return pos;
}
-
+
@Override
public int startOffset() {
return startOffset;
@@ -414,10 +414,10 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
// NOTE: sort of silly: we enum all the docs just to
// get the maxDoc
- DocsEnum docsEnum = getSeedPostings(term, termSeed, false, IndexOptions.DOCS, true);
+ PostingsEnum postingsEnum = getSeedPostings(term, termSeed, false, IndexOptions.DOCS, true);
int doc;
int lastDoc = 0;
- while((doc = docsEnum.nextDoc()) != DocsEnum.NO_MORE_DOCS) {
+ while((doc = postingsEnum.nextDoc()) != PostingsEnum.NO_MORE_DOCS) {
lastDoc = doc;
}
maxDoc = Math.max(lastDoc, maxDoc);
@@ -639,32 +639,27 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
}
@Override
- public final DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
+ public final PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException {
if (liveDocs != null) {
throw new IllegalArgumentException("liveDocs must be null");
}
- if ((flags & DocsEnum.FLAG_FREQS) != 0 && maxAllowed.compareTo(IndexOptions.DOCS_AND_FREQS) < 0) {
+ if (PostingsEnum.requiresPositions(flags)) {
+ if (maxAllowed.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
+ return null;
+ }
+ if ((flags & PostingsEnum.FLAG_OFFSETS) == PostingsEnum.FLAG_OFFSETS && maxAllowed.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) < 0) {
+ return null;
+ }
+ if ((flags & PostingsEnum.FLAG_PAYLOADS) == PostingsEnum.FLAG_PAYLOADS && allowPayloads == false) {
+ return null;
+ }
+ }
+ if ((flags & PostingsEnum.FLAG_FREQS) != 0 && maxAllowed.compareTo(IndexOptions.DOCS_AND_FREQS) < 0) {
return null;
}
return getSeedPostings(current.getKey().utf8ToString(), current.getValue().seed, false, maxAllowed, allowPayloads);
}
- @Override
- public final DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
- if (liveDocs != null) {
- throw new IllegalArgumentException("liveDocs must be null");
- }
- if (maxAllowed.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
- return null;
- }
- if ((flags & DocsAndPositionsEnum.FLAG_OFFSETS) != 0 && maxAllowed.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) < 0) {
- return null;
- }
- if ((flags & DocsAndPositionsEnum.FLAG_PAYLOADS) != 0 && allowPayloads == false) {
- return null;
- }
- return getSeedPostings(current.getKey().utf8ToString(), current.getValue().seed, false, maxAllowed, allowPayloads);
- }
}
// TODO maybe instead of @BeforeClass just make a single test run: build postings & index & test it?
@@ -745,8 +740,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
private static class ThreadState {
// Only used with REUSE option:
- public DocsEnum reuseDocsEnum;
- public DocsAndPositionsEnum reuseDocsAndPositionsEnum;
+ public PostingsEnum reusePostingsEnum;
}
private void verifyEnum(ThreadState threadState,
@@ -809,78 +803,74 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
boolean doCheckPayloads = options.contains(Option.PAYLOADS) && allowPositions && fieldInfo.hasPayloads() && (alwaysTestMax || random().nextInt(3) <= 2);
- DocsEnum prevDocsEnum = null;
+ PostingsEnum prevPostingsEnum = null;
- DocsEnum docsEnum;
- DocsAndPositionsEnum docsAndPositionsEnum;
+ PostingsEnum postingsEnum;
if (!doCheckPositions) {
if (allowPositions && random().nextInt(10) == 7) {
// 10% of the time, even though we will not check positions, pull a DocsAndPositions enum
if (options.contains(Option.REUSE_ENUMS) && random().nextInt(10) < 9) {
- prevDocsEnum = threadState.reuseDocsAndPositionsEnum;
+ prevPostingsEnum = threadState.reusePostingsEnum;
}
- int flags = 0;
+ int flags = PostingsEnum.FLAG_POSITIONS;
if (alwaysTestMax || random().nextBoolean()) {
- flags |= DocsAndPositionsEnum.FLAG_OFFSETS;
+ flags |= PostingsEnum.FLAG_OFFSETS;
}
if (alwaysTestMax || random().nextBoolean()) {
- flags |= DocsAndPositionsEnum.FLAG_PAYLOADS;
+ flags |= PostingsEnum.FLAG_PAYLOADS;
}
if (VERBOSE) {
- System.out.println(" get DocsAndPositionsEnum (but we won't check positions) flags=" + flags);
+ System.out.println(" get DocsEnum (but we won't check positions) flags=" + flags);
}
- threadState.reuseDocsAndPositionsEnum = termsEnum.docsAndPositions(liveDocs, (DocsAndPositionsEnum) prevDocsEnum, flags);
- docsEnum = threadState.reuseDocsAndPositionsEnum;
- docsAndPositionsEnum = threadState.reuseDocsAndPositionsEnum;
+ threadState.reusePostingsEnum = termsEnum.postings(liveDocs, prevPostingsEnum, flags);
+ postingsEnum = threadState.reusePostingsEnum;
} else {
if (VERBOSE) {
System.out.println(" get DocsEnum");
}
if (options.contains(Option.REUSE_ENUMS) && random().nextInt(10) < 9) {
- prevDocsEnum = threadState.reuseDocsEnum;
+ prevPostingsEnum = threadState.reusePostingsEnum;
}
- threadState.reuseDocsEnum = termsEnum.docs(liveDocs, prevDocsEnum, doCheckFreqs ? DocsEnum.FLAG_FREQS : DocsEnum.FLAG_NONE);
- docsEnum = threadState.reuseDocsEnum;
- docsAndPositionsEnum = null;
+ threadState.reusePostingsEnum = termsEnum.postings(liveDocs, prevPostingsEnum, doCheckFreqs ? PostingsEnum.FLAG_FREQS : PostingsEnum.FLAG_NONE);
+ postingsEnum = threadState.reusePostingsEnum;
}
} else {
if (options.contains(Option.REUSE_ENUMS) && random().nextInt(10) < 9) {
- prevDocsEnum = threadState.reuseDocsAndPositionsEnum;
+ prevPostingsEnum = threadState.reusePostingsEnum;
}
- int flags = 0;
+ int flags = PostingsEnum.FLAG_POSITIONS;
if (alwaysTestMax || doCheckOffsets || random().nextInt(3) == 1) {
- flags |= DocsAndPositionsEnum.FLAG_OFFSETS;
+ flags |= PostingsEnum.FLAG_OFFSETS;
}
if (alwaysTestMax || doCheckPayloads|| random().nextInt(3) == 1) {
- flags |= DocsAndPositionsEnum.FLAG_PAYLOADS;
+ flags |= PostingsEnum.FLAG_PAYLOADS;
}
if (VERBOSE) {
- System.out.println(" get DocsAndPositionsEnum flags=" + flags);
+ System.out.println(" get DocsEnum flags=" + flags);
}
- threadState.reuseDocsAndPositionsEnum = termsEnum.docsAndPositions(liveDocs, (DocsAndPositionsEnum) prevDocsEnum, flags);
- docsEnum = threadState.reuseDocsAndPositionsEnum;
- docsAndPositionsEnum = threadState.reuseDocsAndPositionsEnum;
+ threadState.reusePostingsEnum = termsEnum.postings(liveDocs, prevPostingsEnum, flags);
+ postingsEnum = threadState.reusePostingsEnum;
}
- assertNotNull("null DocsEnum", docsEnum);
- int initialDocID = docsEnum.docID();
- assertEquals("inital docID should be -1" + docsEnum, -1, initialDocID);
+ assertNotNull("null DocsEnum", postingsEnum);
+ int initialDocID = postingsEnum.docID();
+ assertEquals("inital docID should be -1" + postingsEnum, -1, initialDocID);
if (VERBOSE) {
- if (prevDocsEnum == null) {
- System.out.println(" got enum=" + docsEnum);
- } else if (prevDocsEnum == docsEnum) {
- System.out.println(" got reuse enum=" + docsEnum);
+ if (prevPostingsEnum == null) {
+ System.out.println(" got enum=" + postingsEnum);
+ } else if (prevPostingsEnum == postingsEnum) {
+ System.out.println(" got reuse enum=" + postingsEnum);
} else {
- System.out.println(" got enum=" + docsEnum + " (reuse of " + prevDocsEnum + " failed)");
+ System.out.println(" got enum=" + postingsEnum + " (reuse of " + prevPostingsEnum + " failed)");
}
}
@@ -930,10 +920,10 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
while (expected.upto <= stopAt) {
if (expected.upto == stopAt) {
if (stopAt == expected.docFreq) {
- assertEquals("DocsEnum should have ended but didn't", DocsEnum.NO_MORE_DOCS, docsEnum.nextDoc());
+ assertEquals("DocsEnum should have ended but didn't", PostingsEnum.NO_MORE_DOCS, postingsEnum.nextDoc());
// Common bug is to forget to set this.doc=NO_MORE_DOCS in the enum!:
- assertEquals("DocsEnum should have ended but didn't", DocsEnum.NO_MORE_DOCS, docsEnum.docID());
+ assertEquals("DocsEnum should have ended but didn't", PostingsEnum.NO_MORE_DOCS, postingsEnum.docID());
}
break;
}
@@ -944,7 +934,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
// Pick target we know exists:
final int skipCount = TestUtil.nextInt(random(), 1, skipInc);
for(int skip=0;skip= stopAt) {
- int target = random().nextBoolean() ? maxDoc : DocsEnum.NO_MORE_DOCS;
+ int target = random().nextBoolean() ? maxDoc : PostingsEnum.NO_MORE_DOCS;
if (VERBOSE) {
System.out.println(" now advance to end (target=" + target + ")");
}
- assertEquals("DocsEnum should have ended but didn't", DocsEnum.NO_MORE_DOCS, docsEnum.advance(target));
+ assertEquals("DocsEnum should have ended but didn't", PostingsEnum.NO_MORE_DOCS, postingsEnum.advance(target));
break;
} else {
if (VERBOSE) {
if (targetDocID != -1) {
- System.out.println(" now advance to random target=" + targetDocID + " (" + expected.upto + " of " + stopAt + ") current=" + docsEnum.docID());
+ System.out.println(" now advance to random target=" + targetDocID + " (" + expected.upto + " of " + stopAt + ") current=" + postingsEnum.docID());
} else {
- System.out.println(" now advance to known-exists target=" + expected.docID() + " (" + expected.upto + " of " + stopAt + ") current=" + docsEnum.docID());
+ System.out.println(" now advance to known-exists target=" + expected.docID() + " (" + expected.upto + " of " + stopAt + ") current=" + postingsEnum.docID());
}
}
- int docID = docsEnum.advance(targetDocID != -1 ? targetDocID : expected.docID());
+ int docID = postingsEnum.advance(targetDocID != -1 ? targetDocID : expected.docID());
assertEquals("docID is wrong", expected.docID(), docID);
}
} else {
@@ -980,9 +970,9 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
if (VERBOSE) {
System.out.println(" now nextDoc to " + expected.docID() + " (" + expected.upto + " of " + stopAt + ")");
}
- int docID = docsEnum.nextDoc();
+ int docID = postingsEnum.nextDoc();
assertEquals("docID is wrong", expected.docID(), docID);
- if (docID == DocsEnum.NO_MORE_DOCS) {
+ if (docID == PostingsEnum.NO_MORE_DOCS) {
break;
}
}
@@ -991,12 +981,12 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
if (VERBOSE) {
System.out.println(" now freq()=" + expected.freq());
}
- int freq = docsEnum.freq();
+ int freq = postingsEnum.freq();
assertEquals("freq is wrong", expected.freq(), freq);
}
if (doCheckPositions) {
- int freq = docsEnum.freq();
+ int freq = postingsEnum.freq();
int numPosToConsume;
if (!alwaysTestMax && options.contains(Option.PARTIAL_POS_CONSUME) && random().nextInt(5) == 1) {
numPosToConsume = random().nextInt(freq);
@@ -1009,7 +999,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
if (VERBOSE) {
System.out.println(" now nextPosition to " + pos);
}
- assertEquals("position is wrong", pos, docsAndPositionsEnum.nextPosition());
+ assertEquals("position is wrong", pos, postingsEnum.nextPosition());
if (doCheckPayloads) {
BytesRef expectedPayload = expected.getPayload();
@@ -1018,9 +1008,9 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
System.out.println(" now check expectedPayload length=" + (expectedPayload == null ? 0 : expectedPayload.length));
}
if (expectedPayload == null || expectedPayload.length == 0) {
- assertNull("should not have payload", docsAndPositionsEnum.getPayload());
+ assertNull("should not have payload", postingsEnum.getPayload());
} else {
- BytesRef payload = docsAndPositionsEnum.getPayload();
+ BytesRef payload = postingsEnum.getPayload();
assertNotNull("should have payload but doesn't", payload);
assertEquals("payload length is wrong", expectedPayload.length, payload.length);
@@ -1032,7 +1022,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
// make a deep copy
payload = BytesRef.deepCopyOf(payload);
- assertEquals("2nd call to getPayload returns something different!", payload, docsAndPositionsEnum.getPayload());
+ assertEquals("2nd call to getPayload returns something different!", payload, postingsEnum.getPayload());
}
} else {
if (VERBOSE) {
@@ -1046,8 +1036,8 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
if (VERBOSE) {
System.out.println(" now check offsets: startOff=" + expected.startOffset() + " endOffset=" + expected.endOffset());
}
- assertEquals("startOffset is wrong", expected.startOffset(), docsAndPositionsEnum.startOffset());
- assertEquals("endOffset is wrong", expected.endOffset(), docsAndPositionsEnum.endOffset());
+ assertEquals("startOffset is wrong", expected.startOffset(), postingsEnum.startOffset());
+ assertEquals("endOffset is wrong", expected.endOffset(), postingsEnum.endOffset());
} else {
if (VERBOSE) {
System.out.println(" skip check offsets");
@@ -1057,8 +1047,8 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
if (VERBOSE) {
System.out.println(" now check offsets are -1");
}
- assertEquals("startOffset isn't -1", -1, docsAndPositionsEnum.startOffset());
- assertEquals("endOffset isn't -1", -1, docsAndPositionsEnum.endOffset());
+ assertEquals("startOffset isn't -1", -1, postingsEnum.startOffset());
+ assertEquals("endOffset isn't -1", -1, postingsEnum.endOffset());
}
}
}
@@ -1406,6 +1396,41 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
IOUtils.rm(path);
}
}
+
+ protected boolean isPostingsEnumReuseImplemented() {
+ return true;
+ }
+
+ public void testPostingsEnumReuse() throws Exception {
+
+ Path path = createTempDir("testPostingsEnumReuse");
+ Directory dir = newFSDirectory(path);
+
+ FieldsProducer fieldsProducer = buildIndex(dir, IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS, random().nextBoolean(), true);
+ Collections.shuffle(allTerms, random());
+ FieldAndTerm fieldAndTerm = allTerms.get(0);
+
+ Terms terms = fieldsProducer.terms(fieldAndTerm.field);
+ TermsEnum te = terms.iterator(null);
+
+ te.seekExact(fieldAndTerm.term);
+ checkReuse(te, PostingsEnum.FLAG_FREQS, PostingsEnum.FLAG_ALL, false);
+ if (isPostingsEnumReuseImplemented())
+ checkReuse(te, PostingsEnum.FLAG_ALL, PostingsEnum.FLAG_ALL, true);
+
+ fieldsProducer.close();
+ dir.close();
+ IOUtils.rm(path);
+ }
+
+ protected static void checkReuse(TermsEnum termsEnum, int firstFlags, int secondFlags, boolean shouldReuse) throws IOException {
+ PostingsEnum postings1 = termsEnum.postings(null, null, firstFlags);
+ PostingsEnum postings2 = termsEnum.postings(null, postings1, secondFlags);
+ if (shouldReuse)
+ assertSame("Expected PostingsEnum " + postings1.getClass().getName() + " to be reused", postings1, postings2);
+ else
+ assertNotSame("Expected PostingsEnum " + postings1.getClass().getName() + " to not be reused", postings1, postings2);
+ }
public void testJustEmptyField() throws Exception {
Directory dir = newDirectory();
@@ -1470,7 +1495,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
LeafReader ar = getOnlySegmentReader(ir);
TermsEnum termsEnum = ar.terms("field").iterator(null);
assertTrue(termsEnum.seekExact(new BytesRef("value")));
- DocsEnum docsEnum = termsEnum.docs(null, null, DocsEnum.FLAG_NONE);
+ PostingsEnum docsEnum = termsEnum.postings(null, null, DocsEnum.FLAG_NONE);
assertEquals(0, docsEnum.nextDoc());
assertEquals(1, docsEnum.freq());
assertEquals(1, docsEnum.nextDoc());
@@ -1506,8 +1531,8 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
TermsEnum termsEnum = terms.iterator(null);
BytesRef term = termsEnum.next();
if (term != null) {
- DocsEnum docsEnum = termsEnum.docs(null, null);
- assertTrue(docsEnum.nextDoc() == DocsEnum.NO_MORE_DOCS);
+ PostingsEnum postingsEnum = termsEnum.postings(null, null);
+ assertTrue(postingsEnum.nextDoc() == PostingsEnum.NO_MORE_DOCS);
}
}
ir.close();
@@ -1592,27 +1617,24 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
assert terms != null;
TermsEnum termsEnum = terms.iterator(null);
- DocsEnum docs = null;
+ PostingsEnum docs = null;
while(termsEnum.next() != null) {
BytesRef term = termsEnum.term();
-
- if (random().nextBoolean()) {
- docs = termsEnum.docs(null, docs, DocsEnum.FLAG_FREQS);
- } else if (docs instanceof DocsAndPositionsEnum) {
- docs = termsEnum.docsAndPositions(null, (DocsAndPositionsEnum) docs, 0);
+ boolean noPositions = random().nextBoolean();
+ if (noPositions) {
+ docs = termsEnum.postings(null, docs, PostingsEnum.FLAG_FREQS);
} else {
- docs = termsEnum.docsAndPositions(null, null, 0);
+ docs = termsEnum.postings(null, null, PostingsEnum.FLAG_POSITIONS);
}
int docFreq = 0;
long totalTermFreq = 0;
- while (docs.nextDoc() != DocsEnum.NO_MORE_DOCS) {
+ while (docs.nextDoc() != PostingsEnum.NO_MORE_DOCS) {
docFreq++;
totalTermFreq += docs.freq();
- if (docs instanceof DocsAndPositionsEnum) {
- DocsAndPositionsEnum posEnum = (DocsAndPositionsEnum) docs;
- int limit = TestUtil.nextInt(random(), 1, docs.freq());
- for(int i=0;i termsEnum = new ThreadLocal<>();
- private final ThreadLocal docsEnum = new ThreadLocal<>();
- private final ThreadLocal docsAndPositionsEnum = new ThreadLocal<>();
+ private final ThreadLocal docsEnum = new ThreadLocal<>();
+ private final ThreadLocal docsAndPositionsEnum = new ThreadLocal<>();
protected void assertEquals(RandomTokenStream tk, FieldType ft, Terms terms) throws IOException {
assertEquals(1, terms.getDocCount());
@@ -440,27 +439,27 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT
assertEquals(1, termsEnum.docFreq());
final FixedBitSet bits = new FixedBitSet(1);
- DocsEnum docsEnum = termsEnum.docs(bits, random().nextBoolean() ? null : this.docsEnum.get());
- assertEquals(DocsEnum.NO_MORE_DOCS, docsEnum.nextDoc());
+ PostingsEnum postingsEnum = termsEnum.postings(bits, random().nextBoolean() ? null : this.docsEnum.get());
+ assertEquals(PostingsEnum.NO_MORE_DOCS, postingsEnum.nextDoc());
bits.set(0);
- docsEnum = termsEnum.docs(random().nextBoolean() ? bits : null, random().nextBoolean() ? null : docsEnum);
- assertNotNull(docsEnum);
- assertEquals(0, docsEnum.nextDoc());
- assertEquals(0, docsEnum.docID());
- assertEquals(tk.freqs.get(termsEnum.term().utf8ToString()), (Integer) docsEnum.freq());
- assertEquals(DocsEnum.NO_MORE_DOCS, docsEnum.nextDoc());
- this.docsEnum.set(docsEnum);
+ postingsEnum = termsEnum.postings(random().nextBoolean() ? bits : null, random().nextBoolean() ? null : postingsEnum);
+ assertNotNull(postingsEnum);
+ assertEquals(0, postingsEnum.nextDoc());
+ assertEquals(0, postingsEnum.docID());
+ assertEquals(tk.freqs.get(termsEnum.term().utf8ToString()), (Integer) postingsEnum.freq());
+ assertEquals(PostingsEnum.NO_MORE_DOCS, postingsEnum.nextDoc());
+ this.docsEnum.set(postingsEnum);
bits.clear(0);
- DocsAndPositionsEnum docsAndPositionsEnum = termsEnum.docsAndPositions(bits, random().nextBoolean() ? null : this.docsAndPositionsEnum.get());
+ PostingsEnum docsAndPositionsEnum = termsEnum.postings(bits, random().nextBoolean() ? null : this.docsEnum.get(), PostingsEnum.FLAG_POSITIONS);
assertEquals(ft.storeTermVectorOffsets() || ft.storeTermVectorPositions(), docsAndPositionsEnum != null);
if (docsAndPositionsEnum != null) {
- assertEquals(DocsEnum.NO_MORE_DOCS, docsAndPositionsEnum.nextDoc());
+ assertEquals(PostingsEnum.NO_MORE_DOCS, docsAndPositionsEnum.nextDoc());
}
bits.set(0);
- docsAndPositionsEnum = termsEnum.docsAndPositions(random().nextBoolean() ? bits : null, random().nextBoolean() ? null : docsAndPositionsEnum);
+ docsAndPositionsEnum = termsEnum.postings(random().nextBoolean() ? bits : null, random().nextBoolean() ? null : docsAndPositionsEnum, PostingsEnum.FLAG_POSITIONS);
assertEquals(ft.storeTermVectorOffsets() || ft.storeTermVectorPositions(), docsAndPositionsEnum != null);
if (terms.hasPositions() || terms.hasOffsets()) {
assertEquals(0, docsAndPositionsEnum.nextDoc());
@@ -515,9 +514,9 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT
// ok
}
}
- assertEquals(DocsEnum.NO_MORE_DOCS, docsAndPositionsEnum.nextDoc());
+ assertEquals(PostingsEnum.NO_MORE_DOCS, docsAndPositionsEnum.nextDoc());
}
- this.docsAndPositionsEnum.set(docsAndPositionsEnum);
+ this.docsEnum.set(docsAndPositionsEnum);
}
assertNull(termsEnum.next());
for (int i = 0; i < 5; ++i) {
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/PerThreadPKLookup.java b/lucene/test-framework/src/java/org/apache/lucene/index/PerThreadPKLookup.java
index 26c89122371..f1554ab3256 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/PerThreadPKLookup.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/PerThreadPKLookup.java
@@ -35,7 +35,7 @@ import org.apache.lucene.util.BytesRef;
public class PerThreadPKLookup {
protected final TermsEnum[] termsEnums;
- protected final DocsEnum[] docsEnums;
+ protected final PostingsEnum[] postingsEnums;
protected final Bits[] liveDocs;
protected final int[] docBases;
protected final int numSegs;
@@ -54,7 +54,7 @@ public class PerThreadPKLookup {
});
termsEnums = new TermsEnum[leaves.size()];
- docsEnums = new DocsEnum[leaves.size()];
+ postingsEnums = new PostingsEnum[leaves.size()];
liveDocs = new Bits[leaves.size()];
docBases = new int[leaves.size()];
int numSegs = 0;
@@ -78,9 +78,9 @@ public class PerThreadPKLookup {
public int lookup(BytesRef id) throws IOException {
for(int seg=0;seg(FS_DIRECTORIES);
CORE_DIRECTORIES.add("RAMDirectory");
- };
+ }
- /** A {@link FilterCachingPolicy} that randomly caches. */
+ /** A {@link org.apache.lucene.search.FilterCachingPolicy} that randomly caches. */
public static final FilterCachingPolicy MAYBE_CACHE_POLICY = new FilterCachingPolicy() {
@Override
@@ -484,7 +437,7 @@ public abstract class LuceneTestCase extends Assert {
}
};
-
+
// -----------------------------------------------------------------
// Fields initialized in class or instance rules.
// -----------------------------------------------------------------
@@ -1307,10 +1260,6 @@ public abstract class LuceneTestCase extends Assert {
String fsdirClass = TEST_DIRECTORY;
if (fsdirClass.equals("random")) {
fsdirClass = RandomPicks.randomFrom(random(), FS_DIRECTORIES);
- if (fsdirClass.equals("SimpleFSDirectory")) {
- // pick again
- fsdirClass = RandomPicks.randomFrom(random(), FS_DIRECTORIES);
- }
}
Class extends FSDirectory> clazz;
@@ -1349,7 +1298,7 @@ public abstract class LuceneTestCase extends Assert {
if (rarely(random) && !bare) {
directory = new NRTCachingDirectory(directory, random.nextDouble(), random.nextDouble());
}
-
+
if (bare) {
BaseDirectoryWrapper base = new BaseDirectoryWrapper(directory);
closeAfterSuite(new CloseableDirectory(base, suiteFailureMarker));
@@ -1470,7 +1419,7 @@ public abstract class LuceneTestCase extends Assert {
/**
* Return a random Locale from the available locales on the system.
- * @see LUCENE-4020
+ * @see "https://issues.apache.org/jira/browse/LUCENE-4020"
*/
public static Locale randomLocale(Random random) {
Locale locales[] = Locale.getAvailableLocales();
@@ -1479,7 +1428,7 @@ public abstract class LuceneTestCase extends Assert {
/**
* Return a random TimeZone from the available timezones on the system
- * @see LUCENE-4020
+ * @see "https://issues.apache.org/jira/browse/LUCENE-4020"
*/
public static TimeZone randomTimeZone(Random random) {
String tzIds[] = TimeZone.getAvailableIDs();
@@ -1516,10 +1465,6 @@ public abstract class LuceneTestCase extends Assert {
if (clazzName.equals("random")) {
if (rarely(random)) {
clazzName = RandomPicks.randomFrom(random, CORE_DIRECTORIES);
- if (clazzName.equals("SimpleFSDirectory")) {
- // pick again
- clazzName = RandomPicks.randomFrom(random, CORE_DIRECTORIES);
- }
} else {
clazzName = "RAMDirectory";
}
@@ -1939,61 +1884,61 @@ public abstract class LuceneTestCase extends Assert {
public void assertTermsEnumEquals(String info, IndexReader leftReader, TermsEnum leftTermsEnum, TermsEnum rightTermsEnum, boolean deep) throws IOException {
BytesRef term;
Bits randomBits = new RandomBits(leftReader.maxDoc(), random().nextDouble(), random());
- DocsAndPositionsEnum leftPositions = null;
- DocsAndPositionsEnum rightPositions = null;
- DocsEnum leftDocs = null;
- DocsEnum rightDocs = null;
+ PostingsEnum leftPositions = null;
+ PostingsEnum rightPositions = null;
+ PostingsEnum leftDocs = null;
+ PostingsEnum rightDocs = null;
while ((term = leftTermsEnum.next()) != null) {
assertEquals(info, term, rightTermsEnum.next());
assertTermStatsEquals(info, leftTermsEnum, rightTermsEnum);
if (deep) {
- assertDocsAndPositionsEnumEquals(info, leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions),
- rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions));
- assertDocsAndPositionsEnumEquals(info, leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions),
- rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions));
+ assertDocsAndPositionsEnumEquals(info, leftPositions = leftTermsEnum.postings(null, leftPositions, PostingsEnum.FLAG_ALL),
+ rightPositions = rightTermsEnum.postings(null, rightPositions, PostingsEnum.FLAG_ALL));
+ assertDocsAndPositionsEnumEquals(info, leftPositions = leftTermsEnum.postings(randomBits, leftPositions, PostingsEnum.FLAG_ALL),
+ rightPositions = rightTermsEnum.postings(randomBits, rightPositions, PostingsEnum.FLAG_ALL));
assertPositionsSkippingEquals(info, leftReader, leftTermsEnum.docFreq(),
- leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions),
- rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions));
+ leftPositions = leftTermsEnum.postings(null, leftPositions, PostingsEnum.FLAG_ALL),
+ rightPositions = rightTermsEnum.postings(null, rightPositions, PostingsEnum.FLAG_ALL));
assertPositionsSkippingEquals(info, leftReader, leftTermsEnum.docFreq(),
- leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions),
- rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions));
+ leftPositions = leftTermsEnum.postings(randomBits, leftPositions, PostingsEnum.FLAG_ALL),
+ rightPositions = rightTermsEnum.postings(randomBits, rightPositions, PostingsEnum.FLAG_ALL));
// with freqs:
- assertDocsEnumEquals(info, leftDocs = leftTermsEnum.docs(null, leftDocs),
- rightDocs = rightTermsEnum.docs(null, rightDocs),
+ assertDocsEnumEquals(info, leftDocs = leftTermsEnum.postings(null, leftDocs),
+ rightDocs = rightTermsEnum.postings(null, rightDocs),
true);
- assertDocsEnumEquals(info, leftDocs = leftTermsEnum.docs(randomBits, leftDocs),
- rightDocs = rightTermsEnum.docs(randomBits, rightDocs),
+ assertDocsEnumEquals(info, leftDocs = leftTermsEnum.postings(randomBits, leftDocs),
+ rightDocs = rightTermsEnum.postings(randomBits, rightDocs),
true);
// w/o freqs:
- assertDocsEnumEquals(info, leftDocs = leftTermsEnum.docs(null, leftDocs, DocsEnum.FLAG_NONE),
- rightDocs = rightTermsEnum.docs(null, rightDocs, DocsEnum.FLAG_NONE),
+ assertDocsEnumEquals(info, leftDocs = leftTermsEnum.postings(null, leftDocs, PostingsEnum.FLAG_NONE),
+ rightDocs = rightTermsEnum.postings(null, rightDocs, PostingsEnum.FLAG_NONE),
false);
- assertDocsEnumEquals(info, leftDocs = leftTermsEnum.docs(randomBits, leftDocs, DocsEnum.FLAG_NONE),
- rightDocs = rightTermsEnum.docs(randomBits, rightDocs, DocsEnum.FLAG_NONE),
+ assertDocsEnumEquals(info, leftDocs = leftTermsEnum.postings(randomBits, leftDocs, PostingsEnum.FLAG_NONE),
+ rightDocs = rightTermsEnum.postings(randomBits, rightDocs, PostingsEnum.FLAG_NONE),
false);
// with freqs:
assertDocsSkippingEquals(info, leftReader, leftTermsEnum.docFreq(),
- leftDocs = leftTermsEnum.docs(null, leftDocs),
- rightDocs = rightTermsEnum.docs(null, rightDocs),
+ leftDocs = leftTermsEnum.postings(null, leftDocs),
+ rightDocs = rightTermsEnum.postings(null, rightDocs),
true);
assertDocsSkippingEquals(info, leftReader, leftTermsEnum.docFreq(),
- leftDocs = leftTermsEnum.docs(randomBits, leftDocs),
- rightDocs = rightTermsEnum.docs(randomBits, rightDocs),
+ leftDocs = leftTermsEnum.postings(randomBits, leftDocs),
+ rightDocs = rightTermsEnum.postings(randomBits, rightDocs),
true);
// w/o freqs:
assertDocsSkippingEquals(info, leftReader, leftTermsEnum.docFreq(),
- leftDocs = leftTermsEnum.docs(null, leftDocs, DocsEnum.FLAG_NONE),
- rightDocs = rightTermsEnum.docs(null, rightDocs, DocsEnum.FLAG_NONE),
+ leftDocs = leftTermsEnum.postings(null, leftDocs, PostingsEnum.FLAG_NONE),
+ rightDocs = rightTermsEnum.postings(null, rightDocs, PostingsEnum.FLAG_NONE),
false);
assertDocsSkippingEquals(info, leftReader, leftTermsEnum.docFreq(),
- leftDocs = leftTermsEnum.docs(randomBits, leftDocs, DocsEnum.FLAG_NONE),
- rightDocs = rightTermsEnum.docs(randomBits, rightDocs, DocsEnum.FLAG_NONE),
+ leftDocs = leftTermsEnum.postings(randomBits, leftDocs, PostingsEnum.FLAG_NONE),
+ rightDocs = rightTermsEnum.postings(randomBits, rightDocs, PostingsEnum.FLAG_NONE),
false);
}
}
@@ -2004,7 +1949,7 @@ public abstract class LuceneTestCase extends Assert {
/**
* checks docs + freqs + positions + payloads, sequentially
*/
- public void assertDocsAndPositionsEnumEquals(String info, DocsAndPositionsEnum leftDocs, DocsAndPositionsEnum rightDocs) throws IOException {
+ public void assertDocsAndPositionsEnumEquals(String info, PostingsEnum leftDocs, PostingsEnum rightDocs) throws IOException {
if (leftDocs == null || rightDocs == null) {
assertNull(leftDocs);
assertNull(rightDocs);
@@ -2030,7 +1975,7 @@ public abstract class LuceneTestCase extends Assert {
/**
* checks docs + freqs, sequentially
*/
- public void assertDocsEnumEquals(String info, DocsEnum leftDocs, DocsEnum rightDocs, boolean hasFreqs) throws IOException {
+ public void assertDocsEnumEquals(String info, PostingsEnum leftDocs, PostingsEnum rightDocs, boolean hasFreqs) throws IOException {
if (leftDocs == null) {
assertNull(rightDocs);
return;
@@ -2050,7 +1995,7 @@ public abstract class LuceneTestCase extends Assert {
/**
* checks advancing docs
*/
- public void assertDocsSkippingEquals(String info, IndexReader leftReader, int docFreq, DocsEnum leftDocs, DocsEnum rightDocs, boolean hasFreqs) throws IOException {
+ public void assertDocsSkippingEquals(String info, IndexReader leftReader, int docFreq, PostingsEnum leftDocs, PostingsEnum rightDocs, boolean hasFreqs) throws IOException {
if (leftDocs == null) {
assertNull(rightDocs);
return;
@@ -2083,7 +2028,7 @@ public abstract class LuceneTestCase extends Assert {
/**
* checks advancing docs + positions
*/
- public void assertPositionsSkippingEquals(String info, IndexReader leftReader, int docFreq, DocsAndPositionsEnum leftDocs, DocsAndPositionsEnum rightDocs) throws IOException {
+ public void assertPositionsSkippingEquals(String info, IndexReader leftReader, int docFreq, PostingsEnum leftDocs, PostingsEnum rightDocs) throws IOException {
if (leftDocs == null || rightDocs == null) {
assertNull(leftDocs);
assertNull(rightDocs);
diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java b/lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java
index aa1f18a7266..015209eb2a1 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java
@@ -46,6 +46,8 @@ import java.util.regex.PatternSyntaxException;
import java.util.zip.ZipEntry;
import java.util.zip.ZipInputStream;
+import com.carrotsearch.randomizedtesting.generators.RandomInts;
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
import org.apache.lucene.codecs.Codec;
import org.apache.lucene.codecs.DocValuesFormat;
import org.apache.lucene.codecs.PostingsFormat;
@@ -71,8 +73,7 @@ import org.apache.lucene.index.CheckIndex;
import org.apache.lucene.index.ConcurrentMergeScheduler;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.DocValuesType;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FilterLeafReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
@@ -92,8 +93,8 @@ import org.apache.lucene.index.TieredMergePolicy;
import org.apache.lucene.mockfile.FilterFileSystem;
import org.apache.lucene.mockfile.WindowsFS;
import org.apache.lucene.search.FieldDoc;
-import org.apache.lucene.search.FilteredQuery.FilterStrategy;
import org.apache.lucene.search.FilteredQuery;
+import org.apache.lucene.search.FilteredQuery.FilterStrategy;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.Directory;
@@ -102,8 +103,6 @@ import org.apache.lucene.store.FilterDirectory;
import org.apache.lucene.store.NoLockFactory;
import org.junit.Assert;
-import com.carrotsearch.randomizedtesting.generators.RandomInts;
-import com.carrotsearch.randomizedtesting.generators.RandomPicks;
/**
* General utility methods for Lucene unit tests.
@@ -997,7 +996,7 @@ public final class TestUtil {
// Returns a DocsEnum, but randomly sometimes uses a
// DocsAndFreqsEnum, DocsAndPositionsEnum. Returns null
// if field/term doesn't exist:
- public static DocsEnum docs(Random random, IndexReader r, String field, BytesRef term, Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
+ public static PostingsEnum docs(Random random, IndexReader r, String field, BytesRef term, Bits liveDocs, PostingsEnum reuse, int flags) throws IOException {
final Terms terms = MultiFields.getTerms(r, field);
if (terms == null) {
return null;
@@ -1011,25 +1010,24 @@ public final class TestUtil {
// Returns a DocsEnum from a positioned TermsEnum, but
// randomly sometimes uses a DocsAndFreqsEnum, DocsAndPositionsEnum.
- public static DocsEnum docs(Random random, TermsEnum termsEnum, Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
+ public static PostingsEnum docs(Random random, TermsEnum termsEnum, Bits liveDocs, PostingsEnum reuse, int flags) throws IOException {
if (random.nextBoolean()) {
if (random.nextBoolean()) {
final int posFlags;
switch (random.nextInt(4)) {
- case 0: posFlags = 0; break;
- case 1: posFlags = DocsAndPositionsEnum.FLAG_OFFSETS; break;
- case 2: posFlags = DocsAndPositionsEnum.FLAG_PAYLOADS; break;
- default: posFlags = DocsAndPositionsEnum.FLAG_OFFSETS | DocsAndPositionsEnum.FLAG_PAYLOADS; break;
+ case 0: posFlags = PostingsEnum.FLAG_POSITIONS; break;
+ case 1: posFlags = PostingsEnum.FLAG_OFFSETS; break;
+ case 2: posFlags = PostingsEnum.FLAG_PAYLOADS; break;
+ default: posFlags = PostingsEnum.FLAG_OFFSETS | PostingsEnum.FLAG_PAYLOADS; break;
}
- // TODO: cast to DocsAndPositionsEnum?
- DocsAndPositionsEnum docsAndPositions = termsEnum.docsAndPositions(liveDocs, null, posFlags);
+ PostingsEnum docsAndPositions = termsEnum.postings(liveDocs, null, posFlags);
if (docsAndPositions != null) {
return docsAndPositions;
}
}
- flags |= DocsEnum.FLAG_FREQS;
+ flags |= PostingsEnum.FLAG_FREQS;
}
- return termsEnum.docs(liveDocs, reuse, flags);
+ return termsEnum.postings(liveDocs, reuse, flags);
}
public static CharSequence stringToCharSequence(String string, Random random) {
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java
index a4cdc485743..9cb3ac0dbc4 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java
@@ -385,18 +385,18 @@ public class LukeRequestHandler extends RequestHandlerBase
// Is there a better way to do this? Shouldn't actually be very costly
// to do it this way.
private static StoredDocument getFirstLiveDoc(Terms terms, LeafReader reader) throws IOException {
- DocsEnum docsEnum = null;
+ PostingsEnum postingsEnum = null;
TermsEnum termsEnum = terms.iterator(null);
BytesRef text;
// Deal with the chance that the first bunch of terms are in deleted documents. Is there a better way?
- for (int idx = 0; idx < 1000 && docsEnum == null; ++idx) {
+ for (int idx = 0; idx < 1000 && postingsEnum == null; ++idx) {
text = termsEnum.next();
if (text == null) { // Ran off the end of the terms enum without finding any live docs with that field in them.
return null;
}
- docsEnum = termsEnum.docs(reader.getLiveDocs(), docsEnum, DocsEnum.FLAG_NONE);
- if (docsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
- return reader.document(docsEnum.docID());
+ postingsEnum = termsEnum.postings(reader.getLiveDocs(), postingsEnum, PostingsEnum.FLAG_NONE);
+ if (postingsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
+ return reader.document(postingsEnum.docID());
}
}
return null;
diff --git a/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java b/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
index b4531d5fd51..d5dad88a9ab 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
@@ -21,21 +21,21 @@ import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.HashMap;
+import java.util.Iterator;
import java.util.List;
import java.util.Map;
-import java.util.Iterator;
-import java.util.Arrays;
import com.carrotsearch.hppc.IntObjectOpenHashMap;
-import com.carrotsearch.hppc.LongOpenHashSet;
+import com.carrotsearch.hppc.IntOpenHashSet;
+import com.carrotsearch.hppc.LongObjectMap;
import com.carrotsearch.hppc.LongObjectOpenHashMap;
+import com.carrotsearch.hppc.LongOpenHashSet;
import com.carrotsearch.hppc.cursors.IntObjectCursor;
import com.carrotsearch.hppc.cursors.LongCursor;
import com.carrotsearch.hppc.cursors.LongObjectCursor;
-import com.carrotsearch.hppc.IntOpenHashSet;
import com.carrotsearch.hppc.cursors.ObjectCursor;
-import com.carrotsearch.hppc.LongObjectMap;
import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.DocValuesType;
import org.apache.lucene.index.FieldInfo;
@@ -73,11 +73,11 @@ import org.apache.solr.core.PluginInfo;
import org.apache.solr.core.SolrCore;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.schema.FieldType;
+import org.apache.solr.schema.StrField;
+import org.apache.solr.schema.TrieDoubleField;
import org.apache.solr.schema.TrieFloatField;
import org.apache.solr.schema.TrieIntField;
import org.apache.solr.schema.TrieLongField;
-import org.apache.solr.schema.TrieDoubleField;
-import org.apache.solr.schema.StrField;
import org.apache.solr.search.CollapsingQParserPlugin;
import org.apache.solr.search.DocIterator;
import org.apache.solr.search.DocList;
diff --git a/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java b/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java
index a3520ea57a6..89835d70b9c 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java
@@ -17,6 +17,21 @@
package org.apache.solr.handler.component;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+
import org.apache.lucene.index.IndexReaderContext;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.ReaderUtil;
@@ -94,20 +109,6 @@ import org.apache.solr.util.SolrPluginUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.io.IOException;
-import java.io.PrintWriter;
-import java.io.StringWriter;
-import java.net.URL;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
/**
* TODO!
@@ -1348,6 +1349,26 @@ public class QueryComponent extends SearchComponent
throw new UnsupportedOperationException();
}
+ @Override
+ public int nextPosition() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
@Override
public int nextDoc() throws IOException {
throw new UnsupportedOperationException();
diff --git a/solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java b/solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java
index 3c5b24f1dba..9bbb5a4b4ea 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java
@@ -24,7 +24,7 @@ import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
@@ -563,7 +563,7 @@ public class QueryElevationComponent extends SearchComponent implements SolrCore
Listleaves = indexSearcher.getTopReaderContext().leaves();
TermsEnum termsEnum = null;
- DocsEnum docsEnum = null;
+ PostingsEnum postingsEnum = null;
for(LeafReaderContext leaf : leaves) {
LeafReader reader = leaf.reader();
int docBase = leaf.docBase;
@@ -574,9 +574,9 @@ public class QueryElevationComponent extends SearchComponent implements SolrCore
while(it.hasNext()) {
BytesRef ref = it.next();
if(termsEnum.seekExact(ref)) {
- docsEnum = termsEnum.docs(liveDocs, docsEnum);
- int doc = docsEnum.nextDoc();
- if(doc != DocsEnum.NO_MORE_DOCS) {
+ postingsEnum = termsEnum.postings(liveDocs, postingsEnum);
+ int doc = postingsEnum.nextDoc();
+ if(doc != PostingsEnum.NO_MORE_DOCS) {
//Found the document.
int p = boosted.get(ref);
boostDocs.put(doc+docBase, p);
@@ -637,7 +637,7 @@ public class QueryElevationComponent extends SearchComponent implements SolrCore
private int bottomVal;
private int topVal;
private TermsEnum termsEnum;
- private DocsEnum docsEnum;
+ private PostingsEnum postingsEnum;
Set seen = new HashSet<>(elevations.ids.size());
@Override
@@ -692,13 +692,13 @@ public class QueryElevationComponent extends SearchComponent implements SolrCore
for (String id : elevations.ids) {
term.copyChars(id);
if (seen.contains(id) == false && termsEnum.seekExact(term.get())) {
- docsEnum = termsEnum.docs(liveDocs, docsEnum, DocsEnum.FLAG_NONE);
- if (docsEnum != null) {
- int docId = docsEnum.nextDoc();
+ postingsEnum = termsEnum.postings(liveDocs, postingsEnum, PostingsEnum.FLAG_NONE);
+ if (postingsEnum != null) {
+ int docId = postingsEnum.nextDoc();
if (docId == DocIdSetIterator.NO_MORE_DOCS ) continue; // must have been deleted
termValues[ordSet.put(docId)] = term.toBytesRef();
seen.add(id);
- assert docsEnum.nextDoc() == DocIdSetIterator.NO_MORE_DOCS;
+ assert postingsEnum.nextDoc() == DocIdSetIterator.NO_MORE_DOCS;
}
}
}
diff --git a/solr/core/src/java/org/apache/solr/handler/component/TermVectorComponent.java b/solr/core/src/java/org/apache/solr/handler/component/TermVectorComponent.java
index e807daab6ba..2df05d27e9a 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/TermVectorComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/TermVectorComponent.java
@@ -1,19 +1,17 @@
package org.apache.solr.handler.component;
import java.io.IOException;
-import java.util.Arrays;
import java.util.ArrayList;
-import java.util.Collection;
+import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
-import java.util.LinkedHashSet;
-import java.util.Set;
import java.util.Iterator;
+import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
-import java.util.Map.Entry;
+import java.util.Set;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.IndexReader;
@@ -24,17 +22,15 @@ import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.util.BytesRef;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.params.SolrParams;
import org.apache.solr.common.params.TermVectorParams;
import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.StrUtils;
import org.apache.solr.core.SolrCore;
import org.apache.solr.schema.IndexSchema;
import org.apache.solr.schema.SchemaField;
-import org.apache.solr.search.ReturnFields;
import org.apache.solr.search.DocList;
import org.apache.solr.search.DocListAndSet;
+import org.apache.solr.search.ReturnFields;
import org.apache.solr.search.SolrIndexSearcher;
import org.apache.solr.search.SolrReturnFields;
import org.apache.solr.util.SolrPluginUtils;
@@ -335,7 +331,7 @@ public class TermVectorComponent extends SearchComponent implements SolrCoreAwar
docNL.add(field, fieldNL);
BytesRef text;
- DocsAndPositionsEnum dpEnum = null;
+ PostingsEnum dpEnum = null;
while((text = termsEnum.next()) != null) {
String term = text.utf8ToString();
NamedList