mirror of https://github.com/apache/lucene.git
LUCENE-7875: Moved MultiFields static methods to MultiTerms, FieldInfos and MultiBits.
MultiBits is now public and has getLiveDocs.
This commit is contained in:
parent
ed8a395948
commit
fd9164801e
|
@ -127,6 +127,12 @@ Changes in Runtime Behavior
|
|||
box anymore. In order to highlight on Block-Join Queries a custom WeightedSpanTermExtractor / FieldQuery
|
||||
should be used. (Simon Willnauer, Jim Ferenczi, Julie Tibshiran)
|
||||
|
||||
* LUCENE-7875: Moved MultiFields static methods out of the class. getLiveDocs is now
|
||||
in MultiBits which is now public. getMergedFieldInfos and getIndexedFields are now in
|
||||
FieldInfos. getTerms is now in MultiTerms. getTermPositionsEnum and getTermDocsEnum
|
||||
were collapsed and renamed to just getTermPostingsEnum and moved to MultiTerms.
|
||||
(David Smiley)
|
||||
|
||||
New Features
|
||||
|
||||
* LUCENE-8340: LongPoint#newDistanceQuery may be used to boost scores based on
|
||||
|
|
|
@ -17,14 +17,21 @@
|
|||
package org.apache.lucene.analysis.query;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.*;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.AnalyzerWrapper;
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.StopFilter;
|
||||
import org.apache.lucene.index.FieldInfos;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.MultiTerms;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
|
@ -78,7 +85,7 @@ public final class QueryAutoStopWordAnalyzer extends AnalyzerWrapper {
|
|||
Analyzer delegate,
|
||||
IndexReader indexReader,
|
||||
int maxDocFreq) throws IOException {
|
||||
this(delegate, indexReader, MultiFields.getIndexedFields(indexReader), maxDocFreq);
|
||||
this(delegate, indexReader, FieldInfos.getIndexedFields(indexReader), maxDocFreq);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -96,7 +103,7 @@ public final class QueryAutoStopWordAnalyzer extends AnalyzerWrapper {
|
|||
Analyzer delegate,
|
||||
IndexReader indexReader,
|
||||
float maxPercentDocs) throws IOException {
|
||||
this(delegate, indexReader, MultiFields.getIndexedFields(indexReader), maxPercentDocs);
|
||||
this(delegate, indexReader, FieldInfos.getIndexedFields(indexReader), maxPercentDocs);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -140,7 +147,7 @@ public final class QueryAutoStopWordAnalyzer extends AnalyzerWrapper {
|
|||
|
||||
for (String field : fields) {
|
||||
Set<String> stopWords = new HashSet<>();
|
||||
Terms terms = MultiFields.getTerms(indexReader, field);
|
||||
Terms terms = MultiTerms.getTerms(indexReader, field);
|
||||
CharsRefBuilder spare = new CharsRefBuilder();
|
||||
if (terms != null) {
|
||||
TermsEnum te = terms.iterator();
|
||||
|
|
|
@ -17,25 +17,25 @@
|
|||
package org.apache.lucene.analysis.standard;
|
||||
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.PostingsEnum;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.MultiTerms;
|
||||
import org.apache.lucene.index.PostingsEnum;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
/** tests for classicanalyzer */
|
||||
public class TestClassicAnalyzer extends BaseTokenStreamTestCase {
|
||||
|
||||
|
@ -299,7 +299,7 @@ public class TestClassicAnalyzer extends BaseTokenStreamTestCase {
|
|||
|
||||
// Make sure position is still incremented when
|
||||
// massive term is skipped:
|
||||
PostingsEnum tps = MultiFields.getTermPositionsEnum(reader,
|
||||
PostingsEnum tps = MultiTerms.getTermPostingsEnum(reader,
|
||||
"content",
|
||||
new BytesRef("another"));
|
||||
assertTrue(tps.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
|
|
|
@ -881,17 +881,17 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
TestUtil.checkIndex(dir);
|
||||
|
||||
// true if this is a 4.0+ index
|
||||
final boolean is40Index = MultiFields.getMergedFieldInfos(reader).fieldInfo("content5") != null;
|
||||
final boolean is40Index = FieldInfos.getMergedFieldInfos(reader).fieldInfo("content5") != null;
|
||||
// true if this is a 4.2+ index
|
||||
final boolean is42Index = MultiFields.getMergedFieldInfos(reader).fieldInfo("dvSortedSet") != null;
|
||||
final boolean is42Index = FieldInfos.getMergedFieldInfos(reader).fieldInfo("dvSortedSet") != null;
|
||||
// true if this is a 4.9+ index
|
||||
final boolean is49Index = MultiFields.getMergedFieldInfos(reader).fieldInfo("dvSortedNumeric") != null;
|
||||
final boolean is49Index = FieldInfos.getMergedFieldInfos(reader).fieldInfo("dvSortedNumeric") != null;
|
||||
// true if this index has points (>= 6.0)
|
||||
final boolean hasPoints = MultiFields.getMergedFieldInfos(reader).fieldInfo("intPoint1d") != null;
|
||||
final boolean hasPoints = FieldInfos.getMergedFieldInfos(reader).fieldInfo("intPoint1d") != null;
|
||||
|
||||
assert is40Index;
|
||||
|
||||
final Bits liveDocs = MultiFields.getLiveDocs(reader);
|
||||
final Bits liveDocs = MultiBits.getLiveDocs(reader);
|
||||
|
||||
for(int i=0;i<35;i++) {
|
||||
if (liveDocs.get(i)) {
|
||||
|
@ -1257,7 +1257,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
for (String name : oldNames) {
|
||||
Directory dir = oldIndexDirs.get(name);
|
||||
IndexReader r = DirectoryReader.open(dir);
|
||||
TermsEnum terms = MultiFields.getTerms(r, "content").iterator();
|
||||
TermsEnum terms = MultiTerms.getTerms(r, "content").iterator();
|
||||
BytesRef t = terms.next();
|
||||
assertNotNull(t);
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ import org.apache.lucene.benchmark.byTask.feeds.QueryMaker;
|
|||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.MultiBits;
|
||||
import org.apache.lucene.search.Collector;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Query;
|
||||
|
@ -88,7 +88,7 @@ public abstract class ReadTask extends PerfTask {
|
|||
// optionally warm and add num docs traversed to count
|
||||
if (withWarm()) {
|
||||
Document doc = null;
|
||||
Bits liveDocs = MultiFields.getLiveDocs(reader);
|
||||
Bits liveDocs = MultiBits.getLiveDocs(reader);
|
||||
for (int m = 0; m < reader.maxDoc(); m++) {
|
||||
if (null == liveDocs || liveDocs.get(m)) {
|
||||
doc = reader.document(m);
|
||||
|
|
|
@ -21,9 +21,9 @@ import java.nio.file.Paths;
|
|||
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.MultiTerms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.FSDirectory;
|
||||
import org.apache.lucene.util.PriorityQueue;
|
||||
|
@ -90,7 +90,7 @@ public class QualityQueriesFinder {
|
|||
IndexReader ir = DirectoryReader.open(dir);
|
||||
try {
|
||||
int threshold = ir.maxDoc() / 10; // ignore words too common.
|
||||
Terms terms = MultiFields.getTerms(ir, field);
|
||||
Terms terms = MultiTerms.getTerms(ir, field);
|
||||
if (terms != null) {
|
||||
TermsEnum termsEnum = terms.iterator();
|
||||
while (termsEnum.next() != null) {
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.apache.lucene.benchmark.byTask.tasks.WriteLineDocTask;
|
|||
import org.apache.lucene.collation.CollationKeyAnalyzer;
|
||||
import org.apache.lucene.facet.taxonomy.TaxonomyReader;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.FieldInfos;
|
||||
import org.apache.lucene.index.Fields;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
|
@ -45,7 +46,7 @@ import org.apache.lucene.index.IndexWriterConfig;
|
|||
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
||||
import org.apache.lucene.index.LogDocMergePolicy;
|
||||
import org.apache.lucene.index.LogMergePolicy;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.MultiTerms;
|
||||
import org.apache.lucene.index.PostingsEnum;
|
||||
import org.apache.lucene.index.SegmentInfos;
|
||||
import org.apache.lucene.index.SerialMergeScheduler;
|
||||
|
@ -374,13 +375,13 @@ public class TestPerfTasksLogic extends BenchmarkTestCase {
|
|||
|
||||
int totalTokenCount2 = 0;
|
||||
|
||||
Collection<String> fields = MultiFields.getIndexedFields(reader);
|
||||
Collection<String> fields = FieldInfos.getIndexedFields(reader);
|
||||
|
||||
for (String fieldName : fields) {
|
||||
if (fieldName.equals(DocMaker.ID_FIELD) || fieldName.equals(DocMaker.DATE_MSEC_FIELD) || fieldName.equals(DocMaker.TIME_SEC_FIELD)) {
|
||||
continue;
|
||||
}
|
||||
Terms terms = MultiFields.getTerms(reader, fieldName);
|
||||
Terms terms = MultiTerms.getTerms(reader, fieldName);
|
||||
if (terms == null) {
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ import org.apache.lucene.analysis.Analyzer;
|
|||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.MultiTerms;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
|
@ -128,7 +128,7 @@ public class BM25NBClassifier implements Classifier<BytesRef> {
|
|||
private List<ClassificationResult<BytesRef>> assignClassNormalizedList(String inputDocument) throws IOException {
|
||||
List<ClassificationResult<BytesRef>> assignedClasses = new ArrayList<>();
|
||||
|
||||
Terms classes = MultiFields.getTerms(indexReader, classFieldName);
|
||||
Terms classes = MultiTerms.getTerms(indexReader, classFieldName);
|
||||
TermsEnum classesEnum = classes.iterator();
|
||||
BytesRef next;
|
||||
String[] tokenizedText = tokenize(inputDocument);
|
||||
|
|
|
@ -28,7 +28,7 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
|||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.MultiTerms;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
|
@ -80,7 +80,7 @@ public class BooleanPerceptronClassifier implements Classifier<Boolean> {
|
|||
*/
|
||||
public BooleanPerceptronClassifier(IndexReader indexReader, Analyzer analyzer, Query query, Integer batchSize,
|
||||
Double bias, String classFieldName, String textFieldName) throws IOException {
|
||||
this.textTerms = MultiFields.getTerms(indexReader, textFieldName);
|
||||
this.textTerms = MultiTerms.getTerms(indexReader, textFieldName);
|
||||
|
||||
if (textTerms == null) {
|
||||
throw new IOException("term vectors need to be available for field " + textFieldName);
|
||||
|
|
|
@ -25,7 +25,7 @@ import java.util.concurrent.ConcurrentHashMap;
|
|||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.MultiTerms;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
|
@ -210,7 +210,7 @@ public class CachingNaiveBayesClassifier extends SimpleNaiveBayesClassifier {
|
|||
// build the cache for the word
|
||||
Map<String, Long> frequencyMap = new HashMap<>();
|
||||
for (String textFieldName : textFieldNames) {
|
||||
TermsEnum termsEnum = MultiFields.getTerms(indexReader, textFieldName).iterator();
|
||||
TermsEnum termsEnum = MultiTerms.getTerms(indexReader, textFieldName).iterator();
|
||||
while (termsEnum.next() != null) {
|
||||
BytesRef term = termsEnum.term();
|
||||
String termText = term.utf8ToString();
|
||||
|
@ -227,7 +227,7 @@ public class CachingNaiveBayesClassifier extends SimpleNaiveBayesClassifier {
|
|||
}
|
||||
|
||||
// fill the class list
|
||||
Terms terms = MultiFields.getTerms(indexReader, classFieldName);
|
||||
Terms terms = MultiTerms.getTerms(indexReader, classFieldName);
|
||||
TermsEnum termsEnum = terms.iterator();
|
||||
while ((termsEnum.next()) != null) {
|
||||
cclasses.add(BytesRef.deepCopyOf(termsEnum.term()));
|
||||
|
@ -236,7 +236,7 @@ public class CachingNaiveBayesClassifier extends SimpleNaiveBayesClassifier {
|
|||
for (BytesRef cclass : cclasses) {
|
||||
double avgNumberOfUniqueTerms = 0;
|
||||
for (String textFieldName : textFieldNames) {
|
||||
terms = MultiFields.getTerms(indexReader, textFieldName);
|
||||
terms = MultiTerms.getTerms(indexReader, textFieldName);
|
||||
long numPostings = terms.getSumDocFreq(); // number of term/doc pairs
|
||||
avgNumberOfUniqueTerms += numPostings / (double) terms.getDocCount();
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ import org.apache.lucene.analysis.Analyzer;
|
|||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.MultiTerms;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
|
@ -135,7 +135,7 @@ public class SimpleNaiveBayesClassifier implements Classifier<BytesRef> {
|
|||
protected List<ClassificationResult<BytesRef>> assignClassNormalizedList(String inputDocument) throws IOException {
|
||||
List<ClassificationResult<BytesRef>> assignedClasses = new ArrayList<>();
|
||||
|
||||
Terms classes = MultiFields.getTerms(indexReader, classFieldName);
|
||||
Terms classes = MultiTerms.getTerms(indexReader, classFieldName);
|
||||
if (classes != null) {
|
||||
TermsEnum classesEnum = classes.iterator();
|
||||
BytesRef next;
|
||||
|
@ -160,7 +160,7 @@ public class SimpleNaiveBayesClassifier implements Classifier<BytesRef> {
|
|||
* @throws IOException if accessing to term vectors or search fails
|
||||
*/
|
||||
protected int countDocsWithClass() throws IOException {
|
||||
Terms terms = MultiFields.getTerms(this.indexReader, this.classFieldName);
|
||||
Terms terms = MultiTerms.getTerms(this.indexReader, this.classFieldName);
|
||||
int docCount;
|
||||
if (terms == null || terms.getDocCount() == -1) { // in case codec doesn't support getDocCount
|
||||
TotalHitCountCollector classQueryCountCollector = new TotalHitCountCollector();
|
||||
|
@ -231,7 +231,7 @@ public class SimpleNaiveBayesClassifier implements Classifier<BytesRef> {
|
|||
private double getTextTermFreqForClass(Term term) throws IOException {
|
||||
double avgNumberOfUniqueTerms = 0;
|
||||
for (String textFieldName : textFieldNames) {
|
||||
Terms terms = MultiFields.getTerms(indexReader, textFieldName);
|
||||
Terms terms = MultiTerms.getTerms(indexReader, textFieldName);
|
||||
long numPostings = terms.getSumDocFreq(); // number of term/doc pairs
|
||||
avgNumberOfUniqueTerms += numPostings / (double) terms.getDocCount(); // avg # of unique terms per doc
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ import org.apache.lucene.classification.SimpleNaiveBayesClassifier;
|
|||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.MultiTerms;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
|
@ -103,7 +103,7 @@ public class SimpleNaiveBayesDocumentClassifier extends SimpleNaiveBayesClassifi
|
|||
List<ClassificationResult<BytesRef>> assignedClasses = new ArrayList<>();
|
||||
Map<String, List<String[]>> fieldName2tokensArray = new LinkedHashMap<>();
|
||||
Map<String, Float> fieldName2boost = new LinkedHashMap<>();
|
||||
Terms classes = MultiFields.getTerms(indexReader, classFieldName);
|
||||
Terms classes = MultiTerms.getTerms(indexReader, classFieldName);
|
||||
if (classes != null) {
|
||||
TermsEnum classesEnum = classes.iterator();
|
||||
BytesRef c;
|
||||
|
@ -218,7 +218,7 @@ public class SimpleNaiveBayesDocumentClassifier extends SimpleNaiveBayesClassifi
|
|||
*/
|
||||
private double getTextTermFreqForClass(Term term, String fieldName) throws IOException {
|
||||
double avgNumberOfUniqueTerms;
|
||||
Terms terms = MultiFields.getTerms(indexReader, fieldName);
|
||||
Terms terms = MultiTerms.getTerms(indexReader, fieldName);
|
||||
long numPostings = terms.getSumDocFreq(); // number of term/doc pairs
|
||||
avgNumberOfUniqueTerms = numPostings / (double) terms.getDocCount(); // avg # of unique terms per doc
|
||||
int docsWithC = indexReader.docFreq(term);
|
||||
|
|
|
@ -27,7 +27,7 @@ import org.apache.lucene.analysis.TokenStream;
|
|||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.MultiTerms;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.TermStates;
|
||||
import org.apache.lucene.index.Terms;
|
||||
|
@ -140,7 +140,7 @@ public class NearestFuzzyQuery extends Query {
|
|||
|
||||
private void addTerms(IndexReader reader, FieldVals f, ScoreTermQueue q) throws IOException {
|
||||
if (f.queryString == null) return;
|
||||
final Terms terms = MultiFields.getTerms(reader, f.fieldName);
|
||||
final Terms terms = MultiTerms.getTerms(reader, f.fieldName);
|
||||
if (terms == null) {
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -24,7 +24,7 @@ import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter;
|
|||
import org.apache.lucene.analysis.reverse.ReverseStringFilter;
|
||||
import org.apache.lucene.classification.utils.ConfusionMatrixGenerator;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.MultiTerms;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
|
@ -122,7 +122,7 @@ public class BM25NBClassifierTest extends ClassificationTestBase<BytesRef> {
|
|||
assertTrue(precision >= 0d);
|
||||
assertTrue(precision <= 1d);
|
||||
|
||||
Terms terms = MultiFields.getTerms(leafReader, categoryFieldName);
|
||||
Terms terms = MultiTerms.getTerms(leafReader, categoryFieldName);
|
||||
TermsEnum iterator = terms.iterator();
|
||||
BytesRef term;
|
||||
while ((term = iterator.next()) != null) {
|
||||
|
|
|
@ -19,7 +19,7 @@ package org.apache.lucene.classification;
|
|||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.classification.utils.ConfusionMatrixGenerator;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.MultiTerms;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
|
@ -111,7 +111,7 @@ public class BooleanPerceptronClassifierTest extends ClassificationTestBase<Bool
|
|||
assertTrue(precision >= 0d);
|
||||
assertTrue(precision <= 1d);
|
||||
|
||||
Terms terms = MultiFields.getTerms(leafReader, booleanFieldName);
|
||||
Terms terms = MultiTerms.getTerms(leafReader, booleanFieldName);
|
||||
TermsEnum iterator = terms.iterator();
|
||||
BytesRef term;
|
||||
while ((term = iterator.next()) != null) {
|
||||
|
|
|
@ -25,7 +25,7 @@ import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter;
|
|||
import org.apache.lucene.analysis.reverse.ReverseStringFilter;
|
||||
import org.apache.lucene.classification.utils.ConfusionMatrixGenerator;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.MultiTerms;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
|
@ -116,7 +116,7 @@ public class CachingNaiveBayesClassifierTest extends ClassificationTestBase<Byte
|
|||
assertTrue(precision >= 0d);
|
||||
assertTrue(precision <= 1d);
|
||||
|
||||
Terms terms = MultiFields.getTerms(leafReader, categoryFieldName);
|
||||
Terms terms = MultiTerms.getTerms(leafReader, categoryFieldName);
|
||||
TermsEnum iterator = terms.iterator();
|
||||
BytesRef term;
|
||||
while ((term = iterator.next()) != null) {
|
||||
|
|
|
@ -19,7 +19,7 @@ package org.apache.lucene.classification;
|
|||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.classification.utils.ConfusionMatrixGenerator;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.MultiTerms;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
|
@ -90,7 +90,7 @@ public class KNearestFuzzyClassifierTest extends ClassificationTestBase<BytesRef
|
|||
assertTrue(precision >= 0d);
|
||||
assertTrue(precision <= 1d);
|
||||
|
||||
Terms terms = MultiFields.getTerms(leafReader, categoryFieldName);
|
||||
Terms terms = MultiTerms.getTerms(leafReader, categoryFieldName);
|
||||
TermsEnum iterator = terms.iterator();
|
||||
BytesRef term;
|
||||
while ((term = iterator.next()) != null) {
|
||||
|
|
|
@ -23,7 +23,7 @@ import org.apache.lucene.analysis.MockAnalyzer;
|
|||
import org.apache.lucene.analysis.en.EnglishAnalyzer;
|
||||
import org.apache.lucene.classification.utils.ConfusionMatrixGenerator;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.MultiTerms;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
|
@ -147,7 +147,7 @@ public class KNearestNeighborClassifierTest extends ClassificationTestBase<Bytes
|
|||
assertTrue(precision >= 0d);
|
||||
assertTrue(precision <= 1d);
|
||||
|
||||
Terms terms = MultiFields.getTerms(leafReader, categoryFieldName);
|
||||
Terms terms = MultiTerms.getTerms(leafReader, categoryFieldName);
|
||||
TermsEnum iterator = terms.iterator();
|
||||
BytesRef term;
|
||||
while ((term = iterator.next()) != null) {
|
||||
|
|
|
@ -24,7 +24,7 @@ import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter;
|
|||
import org.apache.lucene.analysis.reverse.ReverseStringFilter;
|
||||
import org.apache.lucene.classification.utils.ConfusionMatrixGenerator;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.MultiTerms;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
|
@ -124,7 +124,7 @@ public class SimpleNaiveBayesClassifierTest extends ClassificationTestBase<Bytes
|
|||
assertTrue(precision >= 0d);
|
||||
assertTrue(precision <= 1d);
|
||||
|
||||
Terms terms = MultiFields.getTerms(leafReader, categoryFieldName);
|
||||
Terms terms = MultiTerms.getTerms(leafReader, categoryFieldName);
|
||||
TermsEnum iterator = terms.iterator();
|
||||
BytesRef term;
|
||||
while ((term = iterator.next()) != null) {
|
||||
|
|
|
@ -21,7 +21,7 @@ import org.apache.lucene.document.Field;
|
|||
import org.apache.lucene.document.FieldType;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.MultiTerms;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
|
@ -89,7 +89,7 @@ public class DocToDoubleVectorUtilsTest extends LuceneTestCase {
|
|||
|
||||
@Test
|
||||
public void testSparseFreqDoubleArrayConversion() throws Exception {
|
||||
Terms fieldTerms = MultiFields.getTerms(index, "text");
|
||||
Terms fieldTerms = MultiTerms.getTerms(index, "text");
|
||||
if (fieldTerms != null && fieldTerms.size() != -1) {
|
||||
IndexSearcher indexSearcher = new IndexSearcher(index);
|
||||
for (ScoreDoc scoreDoc : indexSearcher.search(new MatchAllDocsQuery(), Integer.MAX_VALUE).scoreDocs) {
|
||||
|
|
|
@ -31,7 +31,7 @@ import org.apache.lucene.index.DirectoryReader;
|
|||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.MultiTerms;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.store.Directory;
|
||||
|
@ -53,7 +53,7 @@ public class TestOrdsBlockTree extends BasePostingsFormatTestCase {
|
|||
doc.add(newTextField("field", "a b c", Field.Store.NO));
|
||||
w.addDocument(doc);
|
||||
IndexReader r = w.getReader();
|
||||
TermsEnum te = MultiFields.getTerms(r, "field").iterator();
|
||||
TermsEnum te = MultiTerms.getTerms(r, "field").iterator();
|
||||
|
||||
// Test next()
|
||||
assertEquals(new BytesRef("a"), te.next());
|
||||
|
@ -114,7 +114,7 @@ public class TestOrdsBlockTree extends BasePostingsFormatTestCase {
|
|||
}
|
||||
w.forceMerge(1);
|
||||
IndexReader r = w.getReader();
|
||||
TermsEnum te = MultiFields.getTerms(r, "field").iterator();
|
||||
TermsEnum te = MultiTerms.getTerms(r, "field").iterator();
|
||||
|
||||
assertTrue(te.seekExact(new BytesRef("mo")));
|
||||
assertEquals(27, te.ord());
|
||||
|
@ -190,7 +190,7 @@ public class TestOrdsBlockTree extends BasePostingsFormatTestCase {
|
|||
}
|
||||
w.forceMerge(1);
|
||||
IndexReader r = w.getReader();
|
||||
TermsEnum te = MultiFields.getTerms(r, "field").iterator();
|
||||
TermsEnum te = MultiTerms.getTerms(r, "field").iterator();
|
||||
|
||||
if (VERBOSE) {
|
||||
while (te.next() != null) {
|
||||
|
@ -250,7 +250,7 @@ public class TestOrdsBlockTree extends BasePostingsFormatTestCase {
|
|||
}
|
||||
w.forceMerge(1);
|
||||
IndexReader r = DirectoryReader.open(w);
|
||||
TermsEnum te = MultiFields.getTerms(r, "field").iterator();
|
||||
TermsEnum te = MultiTerms.getTerms(r, "field").iterator();
|
||||
|
||||
if (VERBOSE) {
|
||||
BytesRef term;
|
||||
|
@ -300,7 +300,7 @@ public class TestOrdsBlockTree extends BasePostingsFormatTestCase {
|
|||
}
|
||||
w.forceMerge(1);
|
||||
IndexReader r = DirectoryReader.open(w);
|
||||
TermsEnum te = MultiFields.getTerms(r, "field").iterator();
|
||||
TermsEnum te = MultiTerms.getTerms(r, "field").iterator();
|
||||
|
||||
BytesRef term;
|
||||
int ord = 0;
|
||||
|
@ -338,7 +338,7 @@ public class TestOrdsBlockTree extends BasePostingsFormatTestCase {
|
|||
}
|
||||
w.forceMerge(1);
|
||||
IndexReader r = DirectoryReader.open(w);
|
||||
TermsEnum te = MultiFields.getTerms(r, "body").iterator();
|
||||
TermsEnum te = MultiTerms.getTerms(r, "body").iterator();
|
||||
|
||||
for(int i=0;i<30;i++) {
|
||||
for(int j=0;j<30;j++) {
|
||||
|
@ -380,7 +380,7 @@ public class TestOrdsBlockTree extends BasePostingsFormatTestCase {
|
|||
|
||||
w.forceMerge(1);
|
||||
IndexReader r = w.getReader();
|
||||
TermsEnum te = MultiFields.getTerms(r, "field").iterator();
|
||||
TermsEnum te = MultiTerms.getTerms(r, "field").iterator();
|
||||
assertEquals(TermsEnum.SeekStatus.NOT_FOUND, te.seekCeil(new BytesRef(new byte[] {0x22})));
|
||||
assertEquals("a", te.term().utf8ToString());
|
||||
assertEquals(1L, te.ord());
|
||||
|
|
|
@ -24,9 +24,12 @@ import java.util.HashMap;
|
|||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.StreamSupport;
|
||||
|
||||
import org.apache.lucene.util.ArrayUtil;
|
||||
|
||||
|
@ -122,7 +125,36 @@ public class FieldInfos implements Iterable<FieldInfo> {
|
|||
}
|
||||
values = Collections.unmodifiableCollection(Arrays.asList(valuesTemp.toArray(new FieldInfo[0])));
|
||||
}
|
||||
|
||||
|
||||
/** Call this to get the (merged) FieldInfos for a
|
||||
* composite reader.
|
||||
* <p>
|
||||
* NOTE: the returned field numbers will likely not
|
||||
* correspond to the actual field numbers in the underlying
|
||||
* readers, and codec metadata ({@link FieldInfo#getAttribute(String)}
|
||||
* will be unavailable.
|
||||
*/
|
||||
public static FieldInfos getMergedFieldInfos(IndexReader reader) {
|
||||
final String softDeletesField = reader.leaves().stream()
|
||||
.map(l -> l.reader().getFieldInfos().getSoftDeletesField())
|
||||
.filter(Objects::nonNull)
|
||||
.findAny().orElse(null);
|
||||
final Builder builder = new Builder(new FieldNumbers(softDeletesField));
|
||||
for(final LeafReaderContext ctx : reader.leaves()) {
|
||||
builder.add(ctx.reader().getFieldInfos());
|
||||
}
|
||||
return builder.finish();
|
||||
}
|
||||
|
||||
/** Returns a set of names of fields that have a terms index. The order is undefined. */
|
||||
public static Collection<String> getIndexedFields(IndexReader reader) {
|
||||
return reader.leaves().stream()
|
||||
.flatMap(l -> StreamSupport.stream(l.reader().getFieldInfos().spliterator(), false)
|
||||
.filter(fi -> fi.getIndexOptions() != IndexOptions.NONE))
|
||||
.map(fi -> fi.name)
|
||||
.collect(Collectors.toSet());
|
||||
}
|
||||
|
||||
/** Returns true if any fields have freqs */
|
||||
public boolean hasFreq() {
|
||||
return hasFreq;
|
||||
|
|
|
@ -331,7 +331,7 @@ public abstract class IndexReader implements Closeable {
|
|||
* requested document is deleted, and therefore asking for a deleted document
|
||||
* may yield unspecified results. Usually this is not required, however you
|
||||
* can test if the doc is deleted by checking the {@link
|
||||
* Bits} returned from {@link MultiFields#getLiveDocs}.
|
||||
* Bits} returned from {@link MultiBits#getLiveDocs}.
|
||||
*
|
||||
* <b>NOTE:</b> only the content of a field is returned,
|
||||
* if that field was stored during indexing. Metadata
|
||||
|
|
|
@ -16,6 +16,8 @@
|
|||
*/
|
||||
package org.apache.lucene.index;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.lucene.util.Bits;
|
||||
|
||||
|
||||
|
@ -27,7 +29,7 @@ import org.apache.lucene.util.Bits;
|
|||
*
|
||||
* @lucene.experimental
|
||||
*/
|
||||
final class MultiBits implements Bits {
|
||||
public final class MultiBits implements Bits {
|
||||
private final Bits[] subs;
|
||||
|
||||
// length is 1+subs.length (the last entry has the maxDoc):
|
||||
|
@ -35,13 +37,45 @@ final class MultiBits implements Bits {
|
|||
|
||||
private final boolean defaultValue;
|
||||
|
||||
public MultiBits(Bits[] subs, int[] starts, boolean defaultValue) {
|
||||
private MultiBits(Bits[] subs, int[] starts, boolean defaultValue) {
|
||||
assert starts.length == 1+subs.length;
|
||||
this.subs = subs;
|
||||
this.starts = starts;
|
||||
this.defaultValue = defaultValue;
|
||||
}
|
||||
|
||||
/** Returns a single {@link Bits} instance for this
|
||||
* reader, merging live Documents on the
|
||||
* fly. This method will return null if the reader
|
||||
* has no deletions.
|
||||
*
|
||||
* <p><b>NOTE</b>: this is a very slow way to access live docs.
|
||||
* For example, each Bits access will require a binary search.
|
||||
* It's better to get the sub-readers and iterate through them
|
||||
* yourself. */
|
||||
public static Bits getLiveDocs(IndexReader reader) {
|
||||
if (reader.hasDeletions()) {
|
||||
final List<LeafReaderContext> leaves = reader.leaves();
|
||||
final int size = leaves.size();
|
||||
assert size > 0 : "A reader with deletions must have at least one leave";
|
||||
if (size == 1) {
|
||||
return leaves.get(0).reader().getLiveDocs();
|
||||
}
|
||||
final Bits[] liveDocs = new Bits[size];
|
||||
final int[] starts = new int[size + 1];
|
||||
for (int i = 0; i < size; i++) {
|
||||
// record all liveDocs, even if they are null
|
||||
final LeafReaderContext ctx = leaves.get(i);
|
||||
liveDocs[i] = ctx.reader().getLiveDocs();
|
||||
starts[i] = ctx.docBase;
|
||||
}
|
||||
starts[size] = reader.maxDoc();
|
||||
return new MultiBits(liveDocs, starts, true);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private boolean checkLength(int reader, int doc) {
|
||||
final int length = starts[1+reader]-starts[reader];
|
||||
assert doc - starts[reader] < length: "doc=" + doc + " reader=" + reader + " starts[reader]=" + starts[reader] + " length=" + length;
|
||||
|
@ -79,37 +113,6 @@ final class MultiBits implements Bits {
|
|||
return b.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents a sub-Bits from
|
||||
* {@link MultiBits#getMatchingSub(org.apache.lucene.index.ReaderSlice) getMatchingSub()}.
|
||||
*/
|
||||
public final static class SubResult {
|
||||
public boolean matches;
|
||||
public Bits result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a sub-Bits matching the provided <code>slice</code>
|
||||
* <p>
|
||||
* Because <code>null</code> usually has a special meaning for
|
||||
* Bits (e.g. no deleted documents), you must check
|
||||
* {@link SubResult#matches} instead to ensure the sub was
|
||||
* actually found.
|
||||
*/
|
||||
public SubResult getMatchingSub(ReaderSlice slice) {
|
||||
int reader = ReaderUtil.subIndex(slice.start, starts);
|
||||
assert reader != -1;
|
||||
assert reader < subs.length: "slice=" + slice + " starts[-1]=" + starts[starts.length-1];
|
||||
final SubResult subResult = new SubResult();
|
||||
if (starts[reader] == slice.start && starts[1+reader] == slice.start+slice.length) {
|
||||
subResult.matches = true;
|
||||
subResult.result = subs[reader];
|
||||
} else {
|
||||
subResult.matches = false;
|
||||
}
|
||||
return subResult;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int length() {
|
||||
return starts[starts.length-1];
|
||||
|
|
|
@ -56,7 +56,7 @@ public class MultiDocValues {
|
|||
} else if (size == 1) {
|
||||
return leaves.get(0).reader().getNormValues(field);
|
||||
}
|
||||
FieldInfo fi = MultiFields.getMergedFieldInfos(r).fieldInfo(field);
|
||||
FieldInfo fi = FieldInfos.getMergedFieldInfos(r).fieldInfo(field); //TODO avoid merging
|
||||
if (fi == null || fi.hasNorms() == false) {
|
||||
return null;
|
||||
}
|
||||
|
|
|
@ -19,17 +19,11 @@ package org.apache.lucene.index;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.StreamSupport;
|
||||
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.MergedIterator;
|
||||
|
||||
/**
|
||||
|
@ -53,123 +47,9 @@ public final class MultiFields extends Fields {
|
|||
private final ReaderSlice[] subSlices;
|
||||
private final Map<String,Terms> terms = new ConcurrentHashMap<>();
|
||||
|
||||
/** Returns a single {@link Bits} instance for this
|
||||
* reader, merging live Documents on the
|
||||
* fly. This method will return null if the reader
|
||||
* has no deletions.
|
||||
*
|
||||
* <p><b>NOTE</b>: this is a very slow way to access live docs.
|
||||
* For example, each Bits access will require a binary search.
|
||||
* It's better to get the sub-readers and iterate through them
|
||||
* yourself. */
|
||||
public static Bits getLiveDocs(IndexReader reader) {
|
||||
if (reader.hasDeletions()) {
|
||||
final List<LeafReaderContext> leaves = reader.leaves();
|
||||
final int size = leaves.size();
|
||||
assert size > 0 : "A reader with deletions must have at least one leave";
|
||||
if (size == 1) {
|
||||
return leaves.get(0).reader().getLiveDocs();
|
||||
}
|
||||
final Bits[] liveDocs = new Bits[size];
|
||||
final int[] starts = new int[size + 1];
|
||||
for (int i = 0; i < size; i++) {
|
||||
// record all liveDocs, even if they are null
|
||||
final LeafReaderContext ctx = leaves.get(i);
|
||||
liveDocs[i] = ctx.reader().getLiveDocs();
|
||||
starts[i] = ctx.docBase;
|
||||
}
|
||||
starts[size] = reader.maxDoc();
|
||||
return new MultiBits(liveDocs, starts, true);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/** This method may return null if the field does not exist or if it has no terms. */
|
||||
public static Terms getTerms(IndexReader r, String field) throws IOException {
|
||||
final List<LeafReaderContext> leaves = r.leaves();
|
||||
if (leaves.size() == 1) {
|
||||
return leaves.get(0).reader().terms(field);
|
||||
}
|
||||
|
||||
final List<Terms> termsPerLeaf = new ArrayList<>(leaves.size());
|
||||
final List<ReaderSlice> slicePerLeaf = new ArrayList<>(leaves.size());
|
||||
|
||||
for (int leafIdx = 0; leafIdx < leaves.size(); leafIdx++) {
|
||||
LeafReaderContext ctx = leaves.get(leafIdx);
|
||||
Terms subTerms = ctx.reader().terms(field);
|
||||
if (subTerms != null) {
|
||||
termsPerLeaf.add(subTerms);
|
||||
slicePerLeaf.add(new ReaderSlice(ctx.docBase, r.maxDoc(), leafIdx));
|
||||
}
|
||||
}
|
||||
|
||||
if (termsPerLeaf.size() == 0) {
|
||||
return null;
|
||||
} else {
|
||||
return new MultiTerms(termsPerLeaf.toArray(Terms.EMPTY_ARRAY),
|
||||
slicePerLeaf.toArray(ReaderSlice.EMPTY_ARRAY));
|
||||
}
|
||||
}
|
||||
|
||||
/** Returns {@link PostingsEnum} for the specified field and
|
||||
* term. This will return null if the field or term does
|
||||
* not exist. */
|
||||
public static PostingsEnum getTermDocsEnum(IndexReader r, String field, BytesRef term) throws IOException {
|
||||
return getTermDocsEnum(r, field, term, PostingsEnum.FREQS);
|
||||
}
|
||||
|
||||
/** Returns {@link PostingsEnum} for the specified field and
|
||||
* term, with control over whether freqs are required.
|
||||
* Some codecs may be able to optimize their
|
||||
* implementation when freqs are not required. This will
|
||||
* return null if the field or term does not exist. See {@link
|
||||
* TermsEnum#postings(PostingsEnum,int)}.*/
|
||||
public static PostingsEnum getTermDocsEnum(IndexReader r, String field, BytesRef term, int flags) throws IOException {
|
||||
assert field != null;
|
||||
assert term != null;
|
||||
final Terms terms = getTerms(r, field);
|
||||
if (terms != null) {
|
||||
final TermsEnum termsEnum = terms.iterator();
|
||||
if (termsEnum.seekExact(term)) {
|
||||
return termsEnum.postings(null, flags);
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/** Returns {@link PostingsEnum} for the specified
|
||||
* field and term. This will return null if the field or
|
||||
* term does not exist or positions were not indexed.
|
||||
* @see #getTermPositionsEnum(IndexReader, String, BytesRef, int) */
|
||||
public static PostingsEnum getTermPositionsEnum(IndexReader r, String field, BytesRef term) throws IOException {
|
||||
return getTermPositionsEnum(r, field, term, PostingsEnum.ALL);
|
||||
}
|
||||
|
||||
/** Returns {@link PostingsEnum} for the specified
|
||||
* field and term, with control over whether offsets and payloads are
|
||||
* required. Some codecs may be able to optimize
|
||||
* their implementation when offsets and/or payloads are not
|
||||
* required. This will return null if the field or term does not
|
||||
* exist. See {@link TermsEnum#postings(PostingsEnum,int)}. */
|
||||
public static PostingsEnum getTermPositionsEnum(IndexReader r, String field, BytesRef term, int flags) throws IOException {
|
||||
assert field != null;
|
||||
assert term != null;
|
||||
final Terms terms = getTerms(r, field);
|
||||
if (terms != null) {
|
||||
final TermsEnum termsEnum = terms.iterator();
|
||||
if (termsEnum.seekExact(term)) {
|
||||
return termsEnum.postings(null, flags);
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Expert: construct a new MultiFields instance directly.
|
||||
* @lucene.internal
|
||||
* Sole constructor.
|
||||
*/
|
||||
// TODO: why is this public?
|
||||
public MultiFields(Fields[] subs, ReaderSlice[] subSlices) {
|
||||
this.subs = subs;
|
||||
this.subSlices = subSlices;
|
||||
|
@ -223,34 +103,5 @@ public final class MultiFields extends Fields {
|
|||
return -1;
|
||||
}
|
||||
|
||||
/** Call this to get the (merged) FieldInfos for a
|
||||
* composite reader.
|
||||
* <p>
|
||||
* NOTE: the returned field numbers will likely not
|
||||
* correspond to the actual field numbers in the underlying
|
||||
* readers, and codec metadata ({@link FieldInfo#getAttribute(String)}
|
||||
* will be unavailable.
|
||||
*/
|
||||
public static FieldInfos getMergedFieldInfos(IndexReader reader) {
|
||||
final String softDeletesField = reader.leaves().stream()
|
||||
.map(l -> l.reader().getFieldInfos().getSoftDeletesField())
|
||||
.filter(Objects::nonNull)
|
||||
.findAny().orElse(null);
|
||||
final FieldInfos.Builder builder = new FieldInfos.Builder(new FieldInfos.FieldNumbers(softDeletesField));
|
||||
for(final LeafReaderContext ctx : reader.leaves()) {
|
||||
builder.add(ctx.reader().getFieldInfos());
|
||||
}
|
||||
return builder.finish();
|
||||
}
|
||||
|
||||
/** Returns a set of names of fields that have a terms index. The order is undefined. */
|
||||
public static Collection<String> getIndexedFields(IndexReader reader) {
|
||||
return reader.leaves().stream()
|
||||
.flatMap(l -> StreamSupport.stream(l.reader().getFieldInfos().spliterator(), false)
|
||||
.filter(fi -> fi.getIndexOptions() != IndexOptions.NONE))
|
||||
.map(fi -> fi.name)
|
||||
.collect(Collectors.toSet());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,34 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.lucene.index;
|
||||
|
||||
/**
|
||||
* Utility methods for working with a {@link IndexReader} as if it were a {@link LeafReader}.
|
||||
*
|
||||
* <p><b>NOTE</b>: for composite readers, you'll get better
|
||||
* performance by gathering the sub readers using
|
||||
* {@link IndexReader#getContext()} to get the
|
||||
* atomic leaves and then operate per-LeafReader,
|
||||
* instead of using this class.
|
||||
* @lucene.experimental
|
||||
*/
|
||||
public class MultiLeafReader {
|
||||
|
||||
private MultiLeafReader() {}
|
||||
|
||||
}
|
|
@ -31,7 +31,6 @@ import org.apache.lucene.util.automaton.CompiledAutomaton;
|
|||
*
|
||||
* @lucene.experimental
|
||||
*/
|
||||
|
||||
public final class MultiTerms extends Terms {
|
||||
private final Terms[] subs;
|
||||
private final ReaderSlice[] subSlices;
|
||||
|
@ -40,13 +39,15 @@ public final class MultiTerms extends Terms {
|
|||
private final boolean hasPositions;
|
||||
private final boolean hasPayloads;
|
||||
|
||||
/** Sole constructor.
|
||||
/**
|
||||
* Sole constructor. Use {@link #getTerms(IndexReader, String)} instead if possible.
|
||||
*
|
||||
* @param subs The {@link Terms} instances of all sub-readers.
|
||||
* @param subSlices A parallel array (matching {@code
|
||||
* subs}) describing the sub-reader slices.
|
||||
* @lucene.internal
|
||||
*/
|
||||
public MultiTerms(Terms[] subs, ReaderSlice[] subSlices) throws IOException {
|
||||
public MultiTerms(Terms[] subs, ReaderSlice[] subSlices) throws IOException { //TODO make private?
|
||||
this.subs = subs;
|
||||
this.subSlices = subSlices;
|
||||
|
||||
|
@ -68,6 +69,60 @@ public final class MultiTerms extends Terms {
|
|||
hasPayloads = hasPositions && _hasPayloads; // if all subs have pos, and at least one has payloads.
|
||||
}
|
||||
|
||||
/** This method may return null if the field does not exist or if it has no terms. */
|
||||
public static Terms getTerms(IndexReader r, String field) throws IOException {
|
||||
final List<LeafReaderContext> leaves = r.leaves();
|
||||
if (leaves.size() == 1) {
|
||||
return leaves.get(0).reader().terms(field);
|
||||
}
|
||||
|
||||
final List<Terms> termsPerLeaf = new ArrayList<>(leaves.size());
|
||||
final List<ReaderSlice> slicePerLeaf = new ArrayList<>(leaves.size());
|
||||
|
||||
for (int leafIdx = 0; leafIdx < leaves.size(); leafIdx++) {
|
||||
LeafReaderContext ctx = leaves.get(leafIdx);
|
||||
Terms subTerms = ctx.reader().terms(field);
|
||||
if (subTerms != null) {
|
||||
termsPerLeaf.add(subTerms);
|
||||
slicePerLeaf.add(new ReaderSlice(ctx.docBase, r.maxDoc(), leafIdx));
|
||||
}
|
||||
}
|
||||
|
||||
if (termsPerLeaf.size() == 0) {
|
||||
return null;
|
||||
} else {
|
||||
return new MultiTerms(termsPerLeaf.toArray(EMPTY_ARRAY),
|
||||
slicePerLeaf.toArray(ReaderSlice.EMPTY_ARRAY));
|
||||
}
|
||||
}
|
||||
|
||||
/** Returns {@link PostingsEnum} for the specified
|
||||
* field and term. This will return null if the field or
|
||||
* term does not exist or positions were not indexed.
|
||||
* @see #getTermPostingsEnum(IndexReader, String, BytesRef, int) */
|
||||
public static PostingsEnum getTermPostingsEnum(IndexReader r, String field, BytesRef term) throws IOException {
|
||||
return getTermPostingsEnum(r, field, term, PostingsEnum.ALL);
|
||||
}
|
||||
|
||||
/** Returns {@link PostingsEnum} for the specified
|
||||
* field and term, with control over whether freqs, positions, offsets or payloads
|
||||
* are required. Some codecs may be able to optimize
|
||||
* their implementation when offsets and/or payloads are not
|
||||
* required. This will return null if the field or term does not
|
||||
* exist. See {@link TermsEnum#postings(PostingsEnum,int)}. */
|
||||
public static PostingsEnum getTermPostingsEnum(IndexReader r, String field, BytesRef term, int flags) throws IOException {
|
||||
assert field != null;
|
||||
assert term != null;
|
||||
final Terms terms = getTerms(r, field);
|
||||
if (terms != null) {
|
||||
final TermsEnum termsEnum = terms.iterator();
|
||||
if (termsEnum.seekExact(term)) {
|
||||
return termsEnum.postings(null, flags);
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/** Expert: returns the Terms being merged. */
|
||||
public Terms[] getSubTerms() {
|
||||
return subs;
|
||||
|
|
|
@ -26,7 +26,7 @@ import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
|
|||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.MultiTerms;
|
||||
import org.apache.lucene.index.PostingsEnum;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
|
@ -84,14 +84,14 @@ public class TestCachingTokenFilter extends BaseTokenStreamTestCase {
|
|||
writer.addDocument(doc);
|
||||
|
||||
IndexReader reader = writer.getReader();
|
||||
PostingsEnum termPositions = MultiFields.getTermPositionsEnum(reader,
|
||||
PostingsEnum termPositions = MultiTerms.getTermPostingsEnum(reader,
|
||||
"preanalyzed",
|
||||
new BytesRef("term1"));
|
||||
assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(1, termPositions.freq());
|
||||
assertEquals(0, termPositions.nextPosition());
|
||||
|
||||
termPositions = MultiFields.getTermPositionsEnum(reader,
|
||||
termPositions = MultiTerms.getTermPostingsEnum(reader,
|
||||
"preanalyzed",
|
||||
new BytesRef("term2"));
|
||||
assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
|
@ -99,7 +99,7 @@ public class TestCachingTokenFilter extends BaseTokenStreamTestCase {
|
|||
assertEquals(1, termPositions.nextPosition());
|
||||
assertEquals(3, termPositions.nextPosition());
|
||||
|
||||
termPositions = MultiFields.getTermPositionsEnum(reader,
|
||||
termPositions = MultiTerms.getTermPostingsEnum(reader,
|
||||
"preanalyzed",
|
||||
new BytesRef("term3"));
|
||||
assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
|
|
|
@ -237,7 +237,7 @@ public class Test2BTerms extends LuceneTestCase {
|
|||
|
||||
private List<BytesRef> findTerms(IndexReader r) throws IOException {
|
||||
System.out.println("TEST: findTerms");
|
||||
final TermsEnum termsEnum = MultiFields.getTerms(r, "field").iterator();
|
||||
final TermsEnum termsEnum = MultiTerms.getTerms(r, "field").iterator();
|
||||
final List<BytesRef> savedTerms = new ArrayList<>();
|
||||
int nextSave = TestUtil.nextInt(random(), 500000, 1000000);
|
||||
BytesRef term;
|
||||
|
@ -255,7 +255,7 @@ public class Test2BTerms extends LuceneTestCase {
|
|||
System.out.println("TEST: run " + terms.size() + " terms on reader=" + r);
|
||||
IndexSearcher s = newSearcher(r);
|
||||
Collections.shuffle(terms, random());
|
||||
TermsEnum termsEnum = MultiFields.getTerms(r, "field").iterator();
|
||||
TermsEnum termsEnum = MultiTerms.getTerms(r, "field").iterator();
|
||||
boolean failed = false;
|
||||
for(int iter=0;iter<10*terms.size();iter++) {
|
||||
final BytesRef term = terms.get(random().nextInt(terms.size()));
|
||||
|
|
|
@ -266,7 +266,7 @@ public class TestBinaryDocValuesUpdates extends LuceneTestCase {
|
|||
writer.close();
|
||||
}
|
||||
|
||||
Bits liveDocs = MultiFields.getLiveDocs(reader);
|
||||
Bits liveDocs = MultiBits.getLiveDocs(reader);
|
||||
boolean[] expectedLiveDocs = new boolean[] { true, false, false, true, true, true };
|
||||
for (int i = 0; i < expectedLiveDocs.length; i++) {
|
||||
assertEquals(expectedLiveDocs[i], liveDocs.get(i));
|
||||
|
|
|
@ -85,13 +85,13 @@ public class TestCustomTermFreq extends LuceneTestCase {
|
|||
doc.add(field);
|
||||
w.addDocument(doc);
|
||||
IndexReader r = DirectoryReader.open(w);
|
||||
PostingsEnum postings = MultiFields.getTermDocsEnum(r, "field", new BytesRef("bar"));
|
||||
PostingsEnum postings = MultiTerms.getTermPostingsEnum(r, "field", new BytesRef("bar"), (int) PostingsEnum.FREQS);
|
||||
assertNotNull(postings);
|
||||
assertEquals(0, postings.nextDoc());
|
||||
assertEquals(128, postings.freq());
|
||||
assertEquals(NO_MORE_DOCS, postings.nextDoc());
|
||||
|
||||
postings = MultiFields.getTermDocsEnum(r, "field", new BytesRef("foo"));
|
||||
postings = MultiTerms.getTermPostingsEnum(r, "field", new BytesRef("foo"), (int) PostingsEnum.FREQS);
|
||||
assertNotNull(postings);
|
||||
assertEquals(0, postings.nextDoc());
|
||||
assertEquals(42, postings.freq());
|
||||
|
@ -123,7 +123,7 @@ public class TestCustomTermFreq extends LuceneTestCase {
|
|||
w.addDocument(doc);
|
||||
|
||||
IndexReader r = DirectoryReader.open(w);
|
||||
PostingsEnum postings = MultiFields.getTermDocsEnum(r, "field", new BytesRef("bar"));
|
||||
PostingsEnum postings = MultiTerms.getTermPostingsEnum(r, "field", new BytesRef("bar"), (int) PostingsEnum.FREQS);
|
||||
assertNotNull(postings);
|
||||
assertEquals(0, postings.nextDoc());
|
||||
assertEquals(128, postings.freq());
|
||||
|
@ -131,7 +131,7 @@ public class TestCustomTermFreq extends LuceneTestCase {
|
|||
assertEquals(50, postings.freq());
|
||||
assertEquals(NO_MORE_DOCS, postings.nextDoc());
|
||||
|
||||
postings = MultiFields.getTermDocsEnum(r, "field", new BytesRef("foo"));
|
||||
postings = MultiTerms.getTermPostingsEnum(r, "field", new BytesRef("foo"), (int) PostingsEnum.FREQS);
|
||||
assertNotNull(postings);
|
||||
assertEquals(0, postings.nextDoc());
|
||||
assertEquals(42, postings.freq());
|
||||
|
@ -156,13 +156,13 @@ public class TestCustomTermFreq extends LuceneTestCase {
|
|||
doc.add(field);
|
||||
w.addDocument(doc);
|
||||
IndexReader r = DirectoryReader.open(w);
|
||||
PostingsEnum postings = MultiFields.getTermDocsEnum(r, "field", new BytesRef("bar"));
|
||||
PostingsEnum postings = MultiTerms.getTermPostingsEnum(r, "field", new BytesRef("bar"), (int) PostingsEnum.FREQS);
|
||||
assertNotNull(postings);
|
||||
assertEquals(0, postings.nextDoc());
|
||||
assertEquals(228, postings.freq());
|
||||
assertEquals(NO_MORE_DOCS, postings.nextDoc());
|
||||
|
||||
postings = MultiFields.getTermDocsEnum(r, "field", new BytesRef("foo"));
|
||||
postings = MultiTerms.getTermPostingsEnum(r, "field", new BytesRef("foo"), (int) PostingsEnum.FREQS);
|
||||
assertNotNull(postings);
|
||||
assertEquals(0, postings.nextDoc());
|
||||
assertEquals(59, postings.freq());
|
||||
|
@ -195,7 +195,7 @@ public class TestCustomTermFreq extends LuceneTestCase {
|
|||
w.addDocument(doc);
|
||||
|
||||
IndexReader r = DirectoryReader.open(w);
|
||||
PostingsEnum postings = MultiFields.getTermDocsEnum(r, "field", new BytesRef("bar"));
|
||||
PostingsEnum postings = MultiTerms.getTermPostingsEnum(r, "field", new BytesRef("bar"), (int) PostingsEnum.FREQS);
|
||||
assertNotNull(postings);
|
||||
assertEquals(0, postings.nextDoc());
|
||||
assertEquals(228, postings.freq());
|
||||
|
@ -203,7 +203,7 @@ public class TestCustomTermFreq extends LuceneTestCase {
|
|||
assertEquals(140, postings.freq());
|
||||
assertEquals(NO_MORE_DOCS, postings.nextDoc());
|
||||
|
||||
postings = MultiFields.getTermDocsEnum(r, "field", new BytesRef("foo"));
|
||||
postings = MultiTerms.getTermPostingsEnum(r, "field", new BytesRef("foo"), (int) PostingsEnum.FREQS);
|
||||
assertNotNull(postings);
|
||||
assertEquals(0, postings.nextDoc());
|
||||
assertEquals(59, postings.freq());
|
||||
|
@ -239,7 +239,7 @@ public class TestCustomTermFreq extends LuceneTestCase {
|
|||
|
||||
IndexReader r = DirectoryReader.open(w);
|
||||
|
||||
TermsEnum termsEnum = MultiFields.getTerms(r, "field").iterator();
|
||||
TermsEnum termsEnum = MultiTerms.getTerms(r, "field").iterator();
|
||||
assertTrue(termsEnum.seekExact(new BytesRef("foo")));
|
||||
assertEquals(179, termsEnum.totalTermFreq());
|
||||
assertTrue(termsEnum.seekExact(new BytesRef("bar")));
|
||||
|
|
|
@ -93,7 +93,7 @@ public class TestDirectoryReader extends LuceneTestCase {
|
|||
MultiReader mr3 = new MultiReader(readers2);
|
||||
|
||||
// test mixing up TermDocs and TermEnums from different readers.
|
||||
TermsEnum te2 = MultiFields.getTerms(mr2, "body").iterator();
|
||||
TermsEnum te2 = MultiTerms.getTerms(mr2, "body").iterator();
|
||||
te2.seekCeil(new BytesRef("wow"));
|
||||
PostingsEnum td = TestUtil.docs(random(), mr2,
|
||||
"body",
|
||||
|
@ -101,7 +101,7 @@ public class TestDirectoryReader extends LuceneTestCase {
|
|||
null,
|
||||
0);
|
||||
|
||||
TermsEnum te3 = MultiFields.getTerms(mr3, "body").iterator();
|
||||
TermsEnum te3 = MultiTerms.getTerms(mr3, "body").iterator();
|
||||
te3.seekCeil(new BytesRef("wow"));
|
||||
td = TestUtil.docs(random(), te3,
|
||||
td,
|
||||
|
@ -185,7 +185,7 @@ public class TestDirectoryReader extends LuceneTestCase {
|
|||
writer.close();
|
||||
// set up reader
|
||||
DirectoryReader reader = DirectoryReader.open(d);
|
||||
FieldInfos fieldInfos = MultiFields.getMergedFieldInfos(reader);
|
||||
FieldInfos fieldInfos = FieldInfos.getMergedFieldInfos(reader);
|
||||
assertNotNull(fieldInfos.fieldInfo("keyword"));
|
||||
assertNotNull(fieldInfos.fieldInfo("text"));
|
||||
assertNotNull(fieldInfos.fieldInfo("unindexed"));
|
||||
|
@ -246,7 +246,7 @@ public class TestDirectoryReader extends LuceneTestCase {
|
|||
|
||||
// verify fields again
|
||||
reader = DirectoryReader.open(d);
|
||||
fieldInfos = MultiFields.getMergedFieldInfos(reader);
|
||||
fieldInfos = FieldInfos.getMergedFieldInfos(reader);
|
||||
|
||||
Collection<String> allFieldNames = new HashSet<>();
|
||||
Collection<String> indexedFieldNames = new HashSet<>();
|
||||
|
@ -559,8 +559,8 @@ public class TestDirectoryReader extends LuceneTestCase {
|
|||
assertEquals("Single segment test differs.", index1.leaves().size() == 1, index2.leaves().size() == 1);
|
||||
|
||||
// check field names
|
||||
FieldInfos fieldInfos1 = MultiFields.getMergedFieldInfos(index1);
|
||||
FieldInfos fieldInfos2 = MultiFields.getMergedFieldInfos(index2);
|
||||
FieldInfos fieldInfos1 = FieldInfos.getMergedFieldInfos(index1);
|
||||
FieldInfos fieldInfos2 = FieldInfos.getMergedFieldInfos(index2);
|
||||
assertEquals("IndexReaders have different numbers of fields.", fieldInfos1.size(), fieldInfos2.size());
|
||||
final int numFields = fieldInfos1.size();
|
||||
for(int fieldID=0;fieldID<numFields;fieldID++) {
|
||||
|
@ -591,8 +591,8 @@ public class TestDirectoryReader extends LuceneTestCase {
|
|||
}
|
||||
|
||||
// check deletions
|
||||
final Bits liveDocs1 = MultiFields.getLiveDocs(index1);
|
||||
final Bits liveDocs2 = MultiFields.getLiveDocs(index2);
|
||||
final Bits liveDocs1 = MultiBits.getLiveDocs(index1);
|
||||
final Bits liveDocs2 = MultiBits.getLiveDocs(index2);
|
||||
for (int i = 0; i < index1.maxDoc(); i++) {
|
||||
assertEquals("Doc " + i + " only deleted in one index.",
|
||||
liveDocs1 == null || !liveDocs1.get(i),
|
||||
|
@ -619,19 +619,19 @@ public class TestDirectoryReader extends LuceneTestCase {
|
|||
}
|
||||
|
||||
// check dictionary and posting lists
|
||||
TreeSet<String> fields1 = new TreeSet<>(MultiFields.getIndexedFields(index1));
|
||||
TreeSet<String> fields2 = new TreeSet<>(MultiFields.getIndexedFields(index2));
|
||||
TreeSet<String> fields1 = new TreeSet<>(FieldInfos.getIndexedFields(index1));
|
||||
TreeSet<String> fields2 = new TreeSet<>(FieldInfos.getIndexedFields(index2));
|
||||
Iterator<String> fenum2 = fields2.iterator();
|
||||
for (String field1 : fields1) {
|
||||
assertEquals("Different fields", field1, fenum2.next());
|
||||
Terms terms1 = MultiFields.getTerms(index1, field1);
|
||||
Terms terms1 = MultiTerms.getTerms(index1, field1);
|
||||
if (terms1 == null) {
|
||||
assertNull(MultiFields.getTerms(index2, field1));
|
||||
assertNull(MultiTerms.getTerms(index2, field1));
|
||||
continue;
|
||||
}
|
||||
TermsEnum enum1 = terms1.iterator();
|
||||
|
||||
Terms terms2 = MultiFields.getTerms(index2, field1);
|
||||
Terms terms2 = MultiTerms.getTerms(index2, field1);
|
||||
assertNotNull(terms2);
|
||||
TermsEnum enum2 = terms2.iterator();
|
||||
|
||||
|
|
|
@ -59,9 +59,9 @@ public class TestDocCount extends LuceneTestCase {
|
|||
}
|
||||
|
||||
private void verifyCount(IndexReader ir) throws Exception {
|
||||
final Collection<String> fields = MultiFields.getIndexedFields(ir);
|
||||
final Collection<String> fields = FieldInfos.getIndexedFields(ir);
|
||||
for (String field : fields) {
|
||||
Terms terms = MultiFields.getTerms(ir, field);
|
||||
Terms terms = MultiTerms.getTerms(ir, field);
|
||||
if (terms == null) {
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -205,7 +205,7 @@ public class TestDocValuesIndexing extends LuceneTestCase {
|
|||
writer.addDocument(doc);
|
||||
}
|
||||
DirectoryReader r = writer.getReader();
|
||||
FieldInfos fi = MultiFields.getMergedFieldInfos(r);
|
||||
FieldInfos fi = FieldInfos.getMergedFieldInfos(r);
|
||||
FieldInfo dvInfo = fi.fieldInfo("dv");
|
||||
assertTrue(dvInfo.getDocValuesType() != DocValuesType.NONE);
|
||||
NumericDocValues dv = MultiDocValues.getNumericValues(r, "dv");
|
||||
|
|
|
@ -126,7 +126,7 @@ public class TestDocumentWriter extends LuceneTestCase {
|
|||
writer.close();
|
||||
SegmentReader reader = new SegmentReader(info, Version.LATEST.major, newIOContext(random()));
|
||||
|
||||
PostingsEnum termPositions = MultiFields.getTermPositionsEnum(reader, "repeated", new BytesRef("repeated"));
|
||||
PostingsEnum termPositions = MultiTerms.getTermPostingsEnum(reader, "repeated", new BytesRef("repeated"));
|
||||
assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
int freq = termPositions.freq();
|
||||
assertEquals(2, freq);
|
||||
|
@ -197,7 +197,7 @@ public class TestDocumentWriter extends LuceneTestCase {
|
|||
writer.close();
|
||||
SegmentReader reader = new SegmentReader(info, Version.LATEST.major, newIOContext(random()));
|
||||
|
||||
PostingsEnum termPositions = MultiFields.getTermPositionsEnum(reader, "f1", new BytesRef("a"));
|
||||
PostingsEnum termPositions = MultiTerms.getTermPostingsEnum(reader, "f1", new BytesRef("a"));
|
||||
assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
int freq = termPositions.freq();
|
||||
assertEquals(3, freq);
|
||||
|
|
|
@ -164,7 +164,7 @@ public class TestFilterLeafReader extends LuceneTestCase {
|
|||
writer.close();
|
||||
IndexReader reader = DirectoryReader.open(target);
|
||||
|
||||
TermsEnum terms = MultiFields.getTerms(reader, "default").iterator();
|
||||
TermsEnum terms = MultiTerms.getTerms(reader, "default").iterator();
|
||||
while (terms.next() != null) {
|
||||
assertTrue(terms.term().utf8ToString().indexOf('e') != -1);
|
||||
}
|
||||
|
|
|
@ -55,7 +55,7 @@ public class TestFlex extends LuceneTestCase {
|
|||
|
||||
IndexReader r = w.getReader();
|
||||
|
||||
TermsEnum terms = MultiFields.getTerms(r, "field3").iterator();
|
||||
TermsEnum terms = MultiTerms.getTerms(r, "field3").iterator();
|
||||
assertEquals(TermsEnum.SeekStatus.END, terms.seekCeil(new BytesRef("abc")));
|
||||
r.close();
|
||||
}
|
||||
|
|
|
@ -1215,7 +1215,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
|
||||
r = DirectoryReader.open(d);
|
||||
assertEquals(2, r.numDeletedDocs());
|
||||
assertNotNull(MultiFields.getLiveDocs(r));
|
||||
assertNotNull(MultiBits.getLiveDocs(r));
|
||||
r.close();
|
||||
d.close();
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ public class TestIndexWriterDeleteByQuery extends LuceneTestCase {
|
|||
doc.add(newStringField("field", "foo", Field.Store.NO));
|
||||
w.addDocument(doc);
|
||||
DirectoryReader r = DirectoryReader.open(w);
|
||||
FieldInfo fi = MultiFields.getMergedFieldInfos(r).fieldInfo("field");
|
||||
FieldInfo fi = FieldInfos.getMergedFieldInfos(r).fieldInfo("field");
|
||||
assertNotNull(fi);
|
||||
assertFalse(fi.hasNorms());
|
||||
assertEquals(1, r.numDocs());
|
||||
|
@ -61,7 +61,7 @@ public class TestIndexWriterDeleteByQuery extends LuceneTestCase {
|
|||
assertEquals(1, r3.maxDoc());
|
||||
|
||||
// Make sure norms can come back to life for a field after deleting by MatchAllDocsQuery:
|
||||
fi = MultiFields.getMergedFieldInfos(r3).fieldInfo("field");
|
||||
fi = FieldInfos.getMergedFieldInfos(r3).fieldInfo("field");
|
||||
assertNotNull(fi);
|
||||
assertTrue(fi.hasNorms());
|
||||
r3.close();
|
||||
|
|
|
@ -536,7 +536,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
null,
|
||||
0);
|
||||
|
||||
final Bits liveDocs = MultiFields.getLiveDocs(reader);
|
||||
final Bits liveDocs = MultiBits.getLiveDocs(reader);
|
||||
int count = 0;
|
||||
while(tdocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
|
||||
if (liveDocs == null || liveDocs.get(tdocs.docID())) {
|
||||
|
@ -671,7 +671,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
assertEquals(expected, reader.docFreq(new Term("contents", "here")));
|
||||
assertEquals(expected, reader.maxDoc());
|
||||
int numDel = 0;
|
||||
final Bits liveDocs = MultiFields.getLiveDocs(reader);
|
||||
final Bits liveDocs = MultiBits.getLiveDocs(reader);
|
||||
assertNotNull(liveDocs);
|
||||
for(int j=0;j<reader.maxDoc();j++) {
|
||||
if (!liveDocs.get(j))
|
||||
|
@ -699,7 +699,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
assertEquals(expected, reader.docFreq(new Term("contents", "here")));
|
||||
assertEquals(expected, reader.maxDoc());
|
||||
int numDel = 0;
|
||||
assertNull(MultiFields.getLiveDocs(reader));
|
||||
assertNull(MultiBits.getLiveDocs(reader));
|
||||
for(int j=0;j<reader.maxDoc();j++) {
|
||||
reader.document(j);
|
||||
reader.getTermVectors(j);
|
||||
|
@ -827,7 +827,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
assertEquals("i=" + i, expected, reader.docFreq(new Term("contents", "here")));
|
||||
assertEquals(expected, reader.maxDoc());
|
||||
int numDel = 0;
|
||||
final Bits liveDocs = MultiFields.getLiveDocs(reader);
|
||||
final Bits liveDocs = MultiBits.getLiveDocs(reader);
|
||||
assertNotNull(liveDocs);
|
||||
for(int j=0;j<reader.maxDoc();j++) {
|
||||
if (!liveDocs.get(j))
|
||||
|
@ -854,7 +854,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
expected += 17-NUM_THREAD*NUM_ITER;
|
||||
assertEquals(expected, reader.docFreq(new Term("contents", "here")));
|
||||
assertEquals(expected, reader.maxDoc());
|
||||
assertNull(MultiFields.getLiveDocs(reader));
|
||||
assertNull(MultiBits.getLiveDocs(reader));
|
||||
for(int j=0;j<reader.maxDoc();j++) {
|
||||
reader.document(j);
|
||||
reader.getTermVectors(j);
|
||||
|
|
|
@ -59,7 +59,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
|||
0);
|
||||
|
||||
if (td != null) {
|
||||
final Bits liveDocs = MultiFields.getLiveDocs(r);
|
||||
final Bits liveDocs = MultiBits.getLiveDocs(r);
|
||||
while (td.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
|
||||
td.docID();
|
||||
if (liveDocs == null || liveDocs.get(td.docID())) {
|
||||
|
|
|
@ -136,7 +136,7 @@ public class TestIndexWriterUnicode extends LuceneTestCase {
|
|||
}
|
||||
|
||||
private void checkTermsOrder(IndexReader r, Set<String> allTerms, boolean isTop) throws IOException {
|
||||
TermsEnum terms = MultiFields.getTerms(r, "f").iterator();
|
||||
TermsEnum terms = MultiTerms.getTerms(r, "f").iterator();
|
||||
|
||||
BytesRefBuilder last = new BytesRefBuilder();
|
||||
|
||||
|
|
|
@ -328,7 +328,7 @@ public class TestIndexWriterWithThreads extends LuceneTestCase {
|
|||
|
||||
if (success) {
|
||||
IndexReader reader = DirectoryReader.open(dir);
|
||||
final Bits delDocs = MultiFields.getLiveDocs(reader);
|
||||
final Bits delDocs = MultiBits.getLiveDocs(reader);
|
||||
for(int j=0;j<reader.maxDoc();j++) {
|
||||
if (delDocs == null || !delDocs.get(j)) {
|
||||
reader.document(j);
|
||||
|
|
|
@ -151,7 +151,7 @@ public class TestLazyProxSkipping extends LuceneTestCase {
|
|||
writer.close();
|
||||
IndexReader reader = DirectoryReader.open(directory);
|
||||
|
||||
PostingsEnum tp = MultiFields.getTermPositionsEnum(reader,
|
||||
PostingsEnum tp = MultiTerms.getTermPostingsEnum(reader,
|
||||
this.field,
|
||||
new BytesRef("b"));
|
||||
|
||||
|
@ -161,7 +161,7 @@ public class TestLazyProxSkipping extends LuceneTestCase {
|
|||
assertEquals(tp.nextPosition(), 1);
|
||||
}
|
||||
|
||||
tp = MultiFields.getTermPositionsEnum(reader,
|
||||
tp = MultiTerms.getTermPostingsEnum(reader,
|
||||
this.field,
|
||||
new BytesRef("a"));
|
||||
|
||||
|
|
|
@ -166,7 +166,7 @@ public class TestLongPostings extends LuceneTestCase {
|
|||
System.out.println("\nTEST: iter=" + iter + " doS1=" + doS1);
|
||||
}
|
||||
|
||||
final PostingsEnum postings = MultiFields.getTermPositionsEnum(r, "field", new BytesRef(term));
|
||||
final PostingsEnum postings = MultiTerms.getTermPostingsEnum(r, "field", new BytesRef(term));
|
||||
|
||||
int docID = -1;
|
||||
while(docID < DocIdSetIterator.NO_MORE_DOCS) {
|
||||
|
|
|
@ -78,7 +78,7 @@ public class TestMaxPosition extends LuceneTestCase {
|
|||
// Document should be visible:
|
||||
IndexReader r = DirectoryReader.open(iw);
|
||||
assertEquals(1, r.numDocs());
|
||||
PostingsEnum postings = MultiFields.getTermPositionsEnum(r, "foo", new BytesRef("foo"));
|
||||
PostingsEnum postings = MultiTerms.getTermPostingsEnum(r, "foo", new BytesRef("foo"));
|
||||
|
||||
// "foo" appears in docID=0
|
||||
assertEquals(0, postings.nextDoc());
|
||||
|
|
|
@ -128,7 +128,7 @@ public class TestMultiFields extends LuceneTestCase {
|
|||
System.out.println("TEST: reader=" + reader);
|
||||
}
|
||||
|
||||
Bits liveDocs = MultiFields.getLiveDocs(reader);
|
||||
Bits liveDocs = MultiBits.getLiveDocs(reader);
|
||||
for(int delDoc : deleted) {
|
||||
assertFalse(liveDocs.get(delDoc));
|
||||
}
|
||||
|
@ -158,7 +158,7 @@ public class TestMultiFields extends LuceneTestCase {
|
|||
DocsEnum docs = _TestUtil.docs(random, r,
|
||||
"field",
|
||||
new BytesRef(term),
|
||||
MultiFields.getLiveDocs(r),
|
||||
MultiLeafReader.getLiveDocs(r),
|
||||
null,
|
||||
false);
|
||||
for(int docID : expected) {
|
||||
|
@ -196,7 +196,7 @@ public class TestMultiFields extends LuceneTestCase {
|
|||
w.addDocument(d);
|
||||
IndexReader r = w.getReader();
|
||||
w.close();
|
||||
PostingsEnum de = MultiFields.getTermDocsEnum(r, "f", new BytesRef("j"));
|
||||
PostingsEnum de = MultiTerms.getTermPostingsEnum(r, "f", new BytesRef("j"), (int) PostingsEnum.FREQS);
|
||||
assertEquals(0, de.nextDoc());
|
||||
assertEquals(1, de.nextDoc());
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, de.nextDoc());
|
||||
|
|
|
@ -354,7 +354,7 @@ public class TestNumericDocValuesUpdates extends LuceneTestCase {
|
|||
writer.close();
|
||||
}
|
||||
|
||||
Bits liveDocs = MultiFields.getLiveDocs(reader);
|
||||
Bits liveDocs = MultiBits.getLiveDocs(reader);
|
||||
boolean[] expectedLiveDocs = new boolean[] { true, false, false, true, true, true };
|
||||
for (int i = 0; i < expectedLiveDocs.length; i++) {
|
||||
assertEquals(expectedLiveDocs[i], liveDocs.get(i));
|
||||
|
|
|
@ -50,7 +50,7 @@ public class TestOmitPositions extends LuceneTestCase {
|
|||
IndexReader reader = w.getReader();
|
||||
w.close();
|
||||
|
||||
assertNotNull(MultiFields.getTermPositionsEnum(reader, "foo", new BytesRef("test")));
|
||||
assertNotNull(MultiTerms.getTermPostingsEnum(reader, "foo", new BytesRef("test")));
|
||||
|
||||
PostingsEnum de = TestUtil.docs(random(), reader, "foo", new BytesRef("test"), null, PostingsEnum.FREQS);
|
||||
while (de.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
|
||||
|
|
|
@ -280,10 +280,10 @@ public class TestParallelCompositeReader extends LuceneTestCase {
|
|||
assertNull(pr.document(0).get("f3"));
|
||||
assertNull(pr.document(0).get("f4"));
|
||||
// check that fields are there
|
||||
assertNotNull(MultiFields.getTerms(pr, "f1"));
|
||||
assertNotNull(MultiFields.getTerms(pr, "f2"));
|
||||
assertNotNull(MultiFields.getTerms(pr, "f3"));
|
||||
assertNotNull(MultiFields.getTerms(pr, "f4"));
|
||||
assertNotNull(MultiTerms.getTerms(pr, "f1"));
|
||||
assertNotNull(MultiTerms.getTerms(pr, "f2"));
|
||||
assertNotNull(MultiTerms.getTerms(pr, "f3"));
|
||||
assertNotNull(MultiTerms.getTerms(pr, "f4"));
|
||||
pr.close();
|
||||
|
||||
// no stored fields at all
|
||||
|
@ -295,10 +295,10 @@ public class TestParallelCompositeReader extends LuceneTestCase {
|
|||
assertNull(pr.document(0).get("f3"));
|
||||
assertNull(pr.document(0).get("f4"));
|
||||
// check that fields are there
|
||||
assertNull(MultiFields.getTerms(pr, "f1"));
|
||||
assertNull(MultiFields.getTerms(pr, "f2"));
|
||||
assertNotNull(MultiFields.getTerms(pr, "f3"));
|
||||
assertNotNull(MultiFields.getTerms(pr, "f4"));
|
||||
assertNull(MultiTerms.getTerms(pr, "f1"));
|
||||
assertNull(MultiTerms.getTerms(pr, "f2"));
|
||||
assertNotNull(MultiTerms.getTerms(pr, "f3"));
|
||||
assertNotNull(MultiTerms.getTerms(pr, "f4"));
|
||||
pr.close();
|
||||
|
||||
// without overlapping
|
||||
|
@ -310,10 +310,10 @@ public class TestParallelCompositeReader extends LuceneTestCase {
|
|||
assertNull(pr.document(0).get("f3"));
|
||||
assertNull(pr.document(0).get("f4"));
|
||||
// check that fields are there
|
||||
assertNull(MultiFields.getTerms(pr, "f1"));
|
||||
assertNull(MultiFields.getTerms(pr, "f2"));
|
||||
assertNotNull(MultiFields.getTerms(pr, "f3"));
|
||||
assertNotNull(MultiFields.getTerms(pr, "f4"));
|
||||
assertNull(MultiTerms.getTerms(pr, "f1"));
|
||||
assertNull(MultiTerms.getTerms(pr, "f2"));
|
||||
assertNotNull(MultiTerms.getTerms(pr, "f3"));
|
||||
assertNotNull(MultiTerms.getTerms(pr, "f4"));
|
||||
pr.close();
|
||||
|
||||
// no main readers
|
||||
|
|
|
@ -191,7 +191,7 @@ public class TestPayloads extends LuceneTestCase {
|
|||
offset = 0;
|
||||
PostingsEnum[] tps = new PostingsEnum[numTerms];
|
||||
for (int i = 0; i < numTerms; i++) {
|
||||
tps[i] = MultiFields.getTermPositionsEnum(reader,
|
||||
tps[i] = MultiTerms.getTermPostingsEnum(reader,
|
||||
terms[i].field(),
|
||||
new BytesRef(terms[i].text()));
|
||||
}
|
||||
|
@ -219,7 +219,7 @@ public class TestPayloads extends LuceneTestCase {
|
|||
/*
|
||||
* test lazy skipping
|
||||
*/
|
||||
PostingsEnum tp = MultiFields.getTermPositionsEnum(reader,
|
||||
PostingsEnum tp = MultiTerms.getTermPostingsEnum(reader,
|
||||
terms[0].field(),
|
||||
new BytesRef(terms[0].text()));
|
||||
tp.nextDoc();
|
||||
|
@ -245,7 +245,7 @@ public class TestPayloads extends LuceneTestCase {
|
|||
/*
|
||||
* Test different lengths at skip points
|
||||
*/
|
||||
tp = MultiFields.getTermPositionsEnum(reader,
|
||||
tp = MultiTerms.getTermPostingsEnum(reader,
|
||||
terms[1].field(),
|
||||
new BytesRef(terms[1].text()));
|
||||
tp.nextDoc();
|
||||
|
@ -282,7 +282,7 @@ public class TestPayloads extends LuceneTestCase {
|
|||
writer.close();
|
||||
|
||||
reader = DirectoryReader.open(dir);
|
||||
tp = MultiFields.getTermPositionsEnum(reader,
|
||||
tp = MultiTerms.getTermPostingsEnum(reader,
|
||||
fieldName,
|
||||
new BytesRef(singleTerm));
|
||||
tp.nextDoc();
|
||||
|
@ -479,7 +479,7 @@ public class TestPayloads extends LuceneTestCase {
|
|||
}
|
||||
writer.close();
|
||||
IndexReader reader = DirectoryReader.open(dir);
|
||||
TermsEnum terms = MultiFields.getTerms(reader, field).iterator();
|
||||
TermsEnum terms = MultiTerms.getTerms(reader, field).iterator();
|
||||
PostingsEnum tp = null;
|
||||
while (terms.next() != null) {
|
||||
String termText = terms.term().utf8ToString();
|
||||
|
@ -602,7 +602,7 @@ public class TestPayloads extends LuceneTestCase {
|
|||
field.setTokenStream(ts);
|
||||
writer.addDocument(doc);
|
||||
DirectoryReader reader = writer.getReader();
|
||||
TermsEnum te = MultiFields.getTerms(reader, "field").iterator();
|
||||
TermsEnum te = MultiTerms.getTerms(reader, "field").iterator();
|
||||
assertTrue(te.seekExact(new BytesRef("withPayload")));
|
||||
PostingsEnum de = te.postings(null, PostingsEnum.PAYLOADS);
|
||||
de.nextDoc();
|
||||
|
|
|
@ -221,7 +221,7 @@ public class TestPerSegmentDeletes extends LuceneTestCase {
|
|||
|
||||
public int[] toDocsArray(Term term, Bits bits, IndexReader reader)
|
||||
throws IOException {
|
||||
TermsEnum ctermsEnum = MultiFields.getTerms(reader, term.field).iterator();
|
||||
TermsEnum ctermsEnum = MultiTerms.getTerms(reader, term.field).iterator();
|
||||
if (ctermsEnum.seekExact(new BytesRef(term.text()))) {
|
||||
PostingsEnum postingsEnum = TestUtil.docs(random(), ctermsEnum, null, PostingsEnum.NONE);
|
||||
return toArray(postingsEnum);
|
||||
|
|
|
@ -81,7 +81,7 @@ public class TestPostingsOffsets extends LuceneTestCase {
|
|||
IndexReader r = w.getReader();
|
||||
w.close();
|
||||
|
||||
PostingsEnum dp = MultiFields.getTermPositionsEnum(r, "content", new BytesRef("a"));
|
||||
PostingsEnum dp = MultiTerms.getTermPostingsEnum(r, "content", new BytesRef("a"));
|
||||
assertNotNull(dp);
|
||||
assertEquals(0, dp.nextDoc());
|
||||
assertEquals(2, dp.freq());
|
||||
|
@ -93,7 +93,7 @@ public class TestPostingsOffsets extends LuceneTestCase {
|
|||
assertEquals(17, dp.endOffset());
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, dp.nextDoc());
|
||||
|
||||
dp = MultiFields.getTermPositionsEnum(r, "content", new BytesRef("b"));
|
||||
dp = MultiTerms.getTermPostingsEnum(r, "content", new BytesRef("b"));
|
||||
assertNotNull(dp);
|
||||
assertEquals(0, dp.nextDoc());
|
||||
assertEquals(1, dp.freq());
|
||||
|
@ -102,7 +102,7 @@ public class TestPostingsOffsets extends LuceneTestCase {
|
|||
assertEquals(9, dp.endOffset());
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, dp.nextDoc());
|
||||
|
||||
dp = MultiFields.getTermPositionsEnum(r, "content", new BytesRef("c"));
|
||||
dp = MultiTerms.getTermPostingsEnum(r, "content", new BytesRef("c"));
|
||||
assertNotNull(dp);
|
||||
assertEquals(0, dp.nextDoc());
|
||||
assertEquals(1, dp.freq());
|
||||
|
@ -153,7 +153,7 @@ public class TestPostingsOffsets extends LuceneTestCase {
|
|||
String terms[] = { "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "hundred" };
|
||||
|
||||
for (String term : terms) {
|
||||
PostingsEnum dp = MultiFields.getTermPositionsEnum(reader, "numbers", new BytesRef(term));
|
||||
PostingsEnum dp = MultiTerms.getTermPostingsEnum(reader, "numbers", new BytesRef(term));
|
||||
int doc;
|
||||
while((doc = dp.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
|
||||
String storedNumbers = reader.document(doc).get("numbers");
|
||||
|
@ -181,7 +181,7 @@ public class TestPostingsOffsets extends LuceneTestCase {
|
|||
|
||||
for (int j = 0; j < numSkippingTests; j++) {
|
||||
int num = TestUtil.nextInt(random(), 100, Math.min(numDocs - 1, 999));
|
||||
PostingsEnum dp = MultiFields.getTermPositionsEnum(reader, "numbers", new BytesRef("hundred"));
|
||||
PostingsEnum dp = MultiTerms.getTermPostingsEnum(reader, "numbers", new BytesRef("hundred"));
|
||||
int doc = dp.advance(num);
|
||||
assertEquals(num, doc);
|
||||
int freq = dp.freq();
|
||||
|
@ -206,7 +206,7 @@ public class TestPostingsOffsets extends LuceneTestCase {
|
|||
// check that other fields (without offsets) work correctly
|
||||
|
||||
for (int i = 0; i < numDocs; i++) {
|
||||
PostingsEnum dp = MultiFields.getTermDocsEnum(reader, "id", new BytesRef("" + i), 0);
|
||||
PostingsEnum dp = MultiTerms.getTermPostingsEnum(reader, "id", new BytesRef("" + i), 0);
|
||||
assertEquals(i, dp.nextDoc());
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, dp.nextDoc());
|
||||
}
|
||||
|
@ -381,7 +381,7 @@ public class TestPostingsOffsets extends LuceneTestCase {
|
|||
riw.addDocument(doc);
|
||||
}
|
||||
CompositeReader ir = riw.getReader();
|
||||
FieldInfos fis = MultiFields.getMergedFieldInfos(ir);
|
||||
FieldInfos fis = FieldInfos.getMergedFieldInfos(ir);
|
||||
assertEquals(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS, fis.fieldInfo("foo").getIndexOptions());
|
||||
ir.close();
|
||||
ir.close();
|
||||
|
|
|
@ -115,9 +115,9 @@ public class TestSegmentReader extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testTerms() throws IOException {
|
||||
final Collection<String> fields = MultiFields.getIndexedFields(reader);
|
||||
final Collection<String> fields = FieldInfos.getIndexedFields(reader);
|
||||
for (String field : fields) {
|
||||
Terms terms = MultiFields.getTerms(reader, field);
|
||||
Terms terms = MultiTerms.getTerms(reader, field);
|
||||
assertNotNull(terms);
|
||||
TermsEnum termsEnum = terms.iterator();
|
||||
while(termsEnum.next() != null) {
|
||||
|
@ -144,7 +144,7 @@ public class TestSegmentReader extends LuceneTestCase {
|
|||
assertTrue(termDocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
|
||||
|
||||
PostingsEnum positions = MultiFields.getTermPositionsEnum(reader,
|
||||
PostingsEnum positions = MultiTerms.getTermPostingsEnum(reader,
|
||||
DocHelper.TEXT_FIELD_1_KEY,
|
||||
new BytesRef("field"));
|
||||
// NOTE: prior rev of this test was failing to first
|
||||
|
|
|
@ -104,7 +104,7 @@ public class TestSegmentTermEnum extends LuceneTestCase {
|
|||
throws IOException
|
||||
{
|
||||
IndexReader reader = DirectoryReader.open(dir);
|
||||
TermsEnum termEnum = MultiFields.getTerms(reader, "content").iterator();
|
||||
TermsEnum termEnum = MultiTerms.getTerms(reader, "content").iterator();
|
||||
|
||||
// create enumeration of all terms
|
||||
// go to the first term (aaa)
|
||||
|
|
|
@ -310,25 +310,25 @@ public class TestStressIndexing2 extends LuceneTestCase {
|
|||
int[] r2r1 = new int[r2.maxDoc()]; // r2 id to r1 id mapping
|
||||
|
||||
// create mapping from id2 space to id2 based on idField
|
||||
if (MultiFields.getIndexedFields(r1).isEmpty()) {
|
||||
assertTrue(MultiFields.getIndexedFields(r2).isEmpty());
|
||||
if (FieldInfos.getIndexedFields(r1).isEmpty()) {
|
||||
assertTrue(FieldInfos.getIndexedFields(r2).isEmpty());
|
||||
return;
|
||||
}
|
||||
final Terms terms1 = MultiFields.getTerms(r1, idField);
|
||||
final Terms terms1 = MultiTerms.getTerms(r1, idField);
|
||||
if (terms1 == null) {
|
||||
assertTrue(MultiFields.getTerms(r2, idField) == null);
|
||||
assertTrue(MultiTerms.getTerms(r2, idField) == null);
|
||||
return;
|
||||
}
|
||||
final TermsEnum termsEnum = terms1.iterator();
|
||||
|
||||
final Bits liveDocs1 = MultiFields.getLiveDocs(r1);
|
||||
final Bits liveDocs2 = MultiFields.getLiveDocs(r2);
|
||||
final Bits liveDocs1 = MultiBits.getLiveDocs(r1);
|
||||
final Bits liveDocs2 = MultiBits.getLiveDocs(r2);
|
||||
|
||||
Terms terms2 = MultiFields.getTerms(r2, idField);
|
||||
Terms terms2 = MultiTerms.getTerms(r2, idField);
|
||||
if (terms2 == null) {
|
||||
// make sure r1 is in fact empty (eg has only all
|
||||
// deleted docs):
|
||||
Bits liveDocs = MultiFields.getLiveDocs(r1);
|
||||
Bits liveDocs = MultiBits.getLiveDocs(r1);
|
||||
PostingsEnum docs = null;
|
||||
while(termsEnum.next() != null) {
|
||||
docs = TestUtil.docs(random(), termsEnum, docs, PostingsEnum.NONE);
|
||||
|
@ -459,8 +459,8 @@ public class TestStressIndexing2 extends LuceneTestCase {
|
|||
|
||||
// Verify postings
|
||||
//System.out.println("TEST: create te1");
|
||||
final Iterator<String> fields1Enum = MultiFields.getIndexedFields(r1).stream().sorted().iterator();
|
||||
final Iterator<String> fields2Enum = MultiFields.getIndexedFields(r2).stream().sorted().iterator();
|
||||
final Iterator<String> fields1Enum = FieldInfos.getIndexedFields(r1).stream().sorted().iterator();
|
||||
final Iterator<String> fields2Enum = FieldInfos.getIndexedFields(r2).stream().sorted().iterator();
|
||||
|
||||
|
||||
String field1=null, field2=null;
|
||||
|
@ -484,7 +484,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
|
|||
break;
|
||||
}
|
||||
field1 = fields1Enum.next();
|
||||
Terms terms = MultiFields.getTerms(r1, field1);
|
||||
Terms terms = MultiTerms.getTerms(r1, field1);
|
||||
if (terms == null) {
|
||||
continue;
|
||||
}
|
||||
|
@ -520,7 +520,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
|
|||
break;
|
||||
}
|
||||
field2 = fields2Enum.next();
|
||||
Terms terms = MultiFields.getTerms(r2, field2);
|
||||
Terms terms = MultiTerms.getTerms(r2, field2);
|
||||
if (terms == null) {
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -75,9 +75,9 @@ public class TestSumDocFreq extends LuceneTestCase {
|
|||
|
||||
private void assertSumDocFreq(IndexReader ir) throws Exception {
|
||||
// compute sumDocFreq across all fields
|
||||
final Collection<String> fields = MultiFields.getIndexedFields(ir);
|
||||
final Collection<String> fields = FieldInfos.getIndexedFields(ir);
|
||||
for (String f : fields) {
|
||||
Terms terms = MultiFields.getTerms(ir, f);
|
||||
Terms terms = MultiTerms.getTerms(ir, f);
|
||||
long sumDocFreq = terms.getSumDocFreq();
|
||||
if (sumDocFreq == -1) {
|
||||
if (VERBOSE) {
|
||||
|
|
|
@ -113,7 +113,7 @@ public class TestTermdocPerf extends LuceneTestCase {
|
|||
|
||||
IndexReader reader = DirectoryReader.open(dir);
|
||||
|
||||
TermsEnum tenum = MultiFields.getTerms(reader, "foo").iterator();
|
||||
TermsEnum tenum = MultiTerms.getTerms(reader, "foo").iterator();
|
||||
|
||||
start = System.currentTimeMillis();
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ public class TestTerms extends LuceneTestCase {
|
|||
doc.add(newTextField("field", "a b c cc ddd", Field.Store.NO));
|
||||
w.addDocument(doc);
|
||||
IndexReader r = w.getReader();
|
||||
Terms terms = MultiFields.getTerms(r, "field");
|
||||
Terms terms = MultiTerms.getTerms(r, "field");
|
||||
assertEquals(new BytesRef("a"), terms.getMin());
|
||||
assertEquals(new BytesRef("ddd"), terms.getMax());
|
||||
r.close();
|
||||
|
@ -74,7 +74,7 @@ public class TestTerms extends LuceneTestCase {
|
|||
}
|
||||
|
||||
IndexReader r = w.getReader();
|
||||
Terms terms = MultiFields.getTerms(r, "field");
|
||||
Terms terms = MultiTerms.getTerms(r, "field");
|
||||
assertEquals(minTerm, terms.getMin());
|
||||
assertEquals(maxTerm, terms.getMax());
|
||||
|
||||
|
|
|
@ -64,7 +64,7 @@ public class TestTermsEnum extends LuceneTestCase {
|
|||
w.close();
|
||||
|
||||
final List<BytesRef> terms = new ArrayList<>();
|
||||
final TermsEnum termsEnum = MultiFields.getTerms(r, "body").iterator();
|
||||
final TermsEnum termsEnum = MultiTerms.getTerms(r, "body").iterator();
|
||||
BytesRef term;
|
||||
while((term = termsEnum.next()) != null) {
|
||||
terms.add(BytesRef.deepCopyOf(term));
|
||||
|
@ -312,7 +312,7 @@ public class TestTermsEnum extends LuceneTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
final TermsEnum te = MultiFields.getTerms(r, "f").intersect(c, startTerm);
|
||||
final TermsEnum te = MultiTerms.getTerms(r, "f").intersect(c, startTerm);
|
||||
|
||||
int loc;
|
||||
if (startTerm == null) {
|
||||
|
@ -491,7 +491,7 @@ public class TestTermsEnum extends LuceneTestCase {
|
|||
assertEquals(1, docFreq(r, "xx"));
|
||||
assertEquals(1, docFreq(r, "aa4"));
|
||||
|
||||
final TermsEnum te = MultiFields.getTerms(r, FIELD).iterator();
|
||||
final TermsEnum te = MultiTerms.getTerms(r, FIELD).iterator();
|
||||
while(te.next() != null) {
|
||||
//System.out.println("TEST: next term=" + te.term().utf8ToString());
|
||||
}
|
||||
|
@ -521,7 +521,7 @@ public class TestTermsEnum extends LuceneTestCase {
|
|||
w.close();
|
||||
assertEquals(1, r.numDocs());
|
||||
assertEquals(1, r.maxDoc());
|
||||
Terms terms = MultiFields.getTerms(r, "field");
|
||||
Terms terms = MultiTerms.getTerms(r, "field");
|
||||
if (terms != null) {
|
||||
assertNull(terms.iterator().next());
|
||||
}
|
||||
|
@ -623,7 +623,7 @@ public class TestTermsEnum extends LuceneTestCase {
|
|||
System.out.println(" " + t.utf8ToString() + " " + t);
|
||||
}
|
||||
}
|
||||
final TermsEnum te = MultiFields.getTerms(r, FIELD).iterator();
|
||||
final TermsEnum te = MultiTerms.getTerms(r, FIELD).iterator();
|
||||
|
||||
final int END_LOC = -validTerms.length-1;
|
||||
|
||||
|
@ -915,7 +915,7 @@ public class TestTermsEnum extends LuceneTestCase {
|
|||
System.out.println("\nTEST: reader=" + r);
|
||||
}
|
||||
|
||||
TermsEnum termsEnum = MultiFields.getTerms(r, "id").iterator();
|
||||
TermsEnum termsEnum = MultiTerms.getTerms(r, "id").iterator();
|
||||
PostingsEnum postingsEnum = null;
|
||||
PerThreadPKLookup pkLookup = new PerThreadPKLookup(r, "id");
|
||||
|
||||
|
@ -1017,7 +1017,7 @@ public class TestTermsEnum extends LuceneTestCase {
|
|||
doc.add(newStringField("field", "foobar", Field.Store.NO));
|
||||
w.addDocument(doc);
|
||||
IndexReader r = w.getReader();
|
||||
Terms terms = MultiFields.getTerms(r, "field");
|
||||
Terms terms = MultiTerms.getTerms(r, "field");
|
||||
CompiledAutomaton automaton = new CompiledAutomaton(new RegExp("do_not_match_anything").toAutomaton());
|
||||
String message = expectThrows(IllegalArgumentException.class, () -> {terms.intersect(automaton, null);}).getMessage();
|
||||
assertEquals("please use CompiledAutomaton.getTermsEnum instead", message);
|
||||
|
|
|
@ -116,7 +116,7 @@ public class TestTermsEnum2 extends LuceneTestCase {
|
|||
String reg = AutomatonTestUtil.randomRegexp(random());
|
||||
Automaton automaton = Operations.determinize(new RegExp(reg, RegExp.NONE).toAutomaton(),
|
||||
DEFAULT_MAX_DETERMINIZED_STATES);
|
||||
TermsEnum te = MultiFields.getTerms(reader, "field").iterator();
|
||||
TermsEnum te = MultiTerms.getTerms(reader, "field").iterator();
|
||||
ArrayList<BytesRef> unsortedTerms = new ArrayList<>(terms);
|
||||
Collections.shuffle(unsortedTerms, random());
|
||||
|
||||
|
@ -139,7 +139,7 @@ public class TestTermsEnum2 extends LuceneTestCase {
|
|||
/** mixes up seek and next for all terms */
|
||||
public void testSeekingAndNexting() throws Exception {
|
||||
for (int i = 0; i < numIterations; i++) {
|
||||
TermsEnum te = MultiFields.getTerms(reader, "field").iterator();
|
||||
TermsEnum te = MultiTerms.getTerms(reader, "field").iterator();
|
||||
|
||||
for (BytesRef term : terms) {
|
||||
int c = random().nextInt(3);
|
||||
|
@ -161,7 +161,7 @@ public class TestTermsEnum2 extends LuceneTestCase {
|
|||
String reg = AutomatonTestUtil.randomRegexp(random());
|
||||
Automaton automaton = new RegExp(reg, RegExp.NONE).toAutomaton();
|
||||
CompiledAutomaton ca = new CompiledAutomaton(automaton, Operations.isFinite(automaton), false);
|
||||
TermsEnum te = MultiFields.getTerms(reader, "field").intersect(ca, null);
|
||||
TermsEnum te = MultiTerms.getTerms(reader, "field").intersect(ca, null);
|
||||
Automaton expected = Operations.determinize(Operations.intersection(termsAutomaton, automaton),
|
||||
DEFAULT_MAX_DETERMINIZED_STATES);
|
||||
TreeSet<BytesRef> found = new TreeSet<>();
|
||||
|
|
|
@ -93,7 +93,7 @@ public class TestTransactionRollback extends LuceneTestCase {
|
|||
|
||||
//Perhaps not the most efficient approach but meets our
|
||||
//needs here.
|
||||
final Bits liveDocs = MultiFields.getLiveDocs(r);
|
||||
final Bits liveDocs = MultiBits.getLiveDocs(r);
|
||||
for (int i = 0; i < r.maxDoc(); i++) {
|
||||
if (liveDocs == null || liveDocs.get(i)) {
|
||||
String sval=r.document(i).get(FIELD_RECORD_ID);
|
||||
|
|
|
@ -26,7 +26,7 @@ import java.util.concurrent.CountDownLatch;
|
|||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.MultiTerms;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.index.SingleTermsEnum;
|
||||
import org.apache.lucene.index.Term;
|
||||
|
@ -181,7 +181,7 @@ public class TestAutomatonQuery extends LuceneTestCase {
|
|||
*/
|
||||
public void testRewriteSingleTerm() throws IOException {
|
||||
AutomatonQuery aq = new AutomatonQuery(newTerm("bogus"), Automata.makeString("piece"));
|
||||
Terms terms = MultiFields.getTerms(searcher.getIndexReader(), FN);
|
||||
Terms terms = MultiTerms.getTerms(searcher.getIndexReader(), FN);
|
||||
assertTrue(aq.getTermsEnum(terms) instanceof SingleTermsEnum);
|
||||
assertEquals(1, automatonQueryNrHits(aq));
|
||||
}
|
||||
|
@ -204,7 +204,7 @@ public class TestAutomatonQuery extends LuceneTestCase {
|
|||
AutomatonQuery aq = new AutomatonQuery(newTerm("bogus"), Automata.makeEmpty());
|
||||
// not yet available: assertTrue(aq.getEnum(searcher.getIndexReader())
|
||||
// instanceof EmptyTermEnum);
|
||||
Terms terms = MultiFields.getTerms(searcher.getIndexReader(), FN);
|
||||
Terms terms = MultiTerms.getTerms(searcher.getIndexReader(), FN);
|
||||
assertSame(TermsEnum.EMPTY, aq.getTermsEnum(terms));
|
||||
assertEquals(0, automatonQueryNrHits(aq));
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ import org.apache.lucene.index.DirectoryReader;
|
|||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.MultiTerms;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
|
@ -71,7 +71,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase {
|
|||
|
||||
// this TermEnum gives "piccadilly", "pie" and "pizza".
|
||||
String prefix = "pi";
|
||||
TermsEnum te = MultiFields.getTerms(reader,"body").iterator();
|
||||
TermsEnum te = MultiTerms.getTerms(reader,"body").iterator();
|
||||
te.seekCeil(new BytesRef(prefix));
|
||||
do {
|
||||
String s = te.term().utf8ToString();
|
||||
|
|
|
@ -23,7 +23,7 @@ import java.util.LinkedList;
|
|||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.MultiTerms;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
|
@ -73,7 +73,7 @@ public class TestPhrasePrefixQuery extends LuceneTestCase {
|
|||
|
||||
// this TermEnum gives "piccadilly", "pie" and "pizza".
|
||||
String prefix = "pi";
|
||||
TermsEnum te = MultiFields.getTerms(reader, "body").iterator();
|
||||
TermsEnum te = MultiTerms.getTerms(reader, "body").iterator();
|
||||
te.seekCeil(new BytesRef(prefix));
|
||||
do {
|
||||
String s = te.term().utf8ToString();
|
||||
|
|
|
@ -33,7 +33,7 @@ import org.apache.lucene.document.Field;
|
|||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.MultiTerms;
|
||||
import org.apache.lucene.index.PostingsEnum;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.index.Term;
|
||||
|
@ -101,14 +101,14 @@ public class TestPositionIncrement extends LuceneTestCase {
|
|||
|
||||
IndexSearcher searcher = newSearcher(reader);
|
||||
|
||||
PostingsEnum pos = MultiFields.getTermPositionsEnum(searcher.getIndexReader(),
|
||||
PostingsEnum pos = MultiTerms.getTermPostingsEnum(searcher.getIndexReader(),
|
||||
"field",
|
||||
new BytesRef("1"));
|
||||
pos.nextDoc();
|
||||
// first token should be at position 0
|
||||
assertEquals(0, pos.nextPosition());
|
||||
|
||||
pos = MultiFields.getTermPositionsEnum(searcher.getIndexReader(),
|
||||
pos = MultiTerms.getTermPostingsEnum(searcher.getIndexReader(),
|
||||
"field",
|
||||
new BytesRef("2"));
|
||||
pos.nextDoc();
|
||||
|
|
|
@ -28,7 +28,7 @@ import org.apache.lucene.analysis.MockAnalyzer;
|
|||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.MultiTerms;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.Terms;
|
||||
|
@ -61,7 +61,7 @@ public class TestSameScoresWithThreads extends LuceneTestCase {
|
|||
w.close();
|
||||
|
||||
final IndexSearcher s = newSearcher(r);
|
||||
Terms terms = MultiFields.getTerms(r, "body");
|
||||
Terms terms = MultiTerms.getTerms(r, "body");
|
||||
int termCount = 0;
|
||||
TermsEnum termsEnum = terms.iterator();
|
||||
while(termsEnum.next() != null) {
|
||||
|
|
|
@ -24,8 +24,8 @@ import java.util.List;
|
|||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexReaderContext;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.MultiReader;
|
||||
import org.apache.lucene.index.MultiTerms;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
@ -174,7 +174,7 @@ public class TestShardSearching extends ShardSearchingTestBase {
|
|||
if (terms == null && docCount > minDocsToMakeTerms) {
|
||||
// TODO: try to "focus" on high freq terms sometimes too
|
||||
// TODO: maybe also periodically reset the terms...?
|
||||
final TermsEnum termsEnum = MultiFields.getTerms(mockReader, "body").iterator();
|
||||
final TermsEnum termsEnum = MultiTerms.getTerms(mockReader, "body").iterator();
|
||||
terms = new ArrayList<>();
|
||||
while(termsEnum.next() != null) {
|
||||
terms.add(BytesRef.deepCopyOf(termsEnum.term()));
|
||||
|
|
|
@ -18,13 +18,13 @@ package org.apache.lucene.search;
|
|||
|
||||
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.MultiTerms;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.Terms;
|
||||
|
@ -116,7 +116,7 @@ public class TestWildcard extends LuceneTestCase {
|
|||
|
||||
wq = new WildcardQuery(new Term("field", "*"));
|
||||
assertMatches(searcher, wq, 2);
|
||||
Terms terms = MultiFields.getTerms(searcher.getIndexReader(), "field");
|
||||
Terms terms = MultiTerms.getTerms(searcher.getIndexReader(), "field");
|
||||
assertFalse(wq.getTermsEnum(terms).getClass().getSimpleName().contains("AutomatonTermsEnum"));
|
||||
reader.close();
|
||||
indexStore.close();
|
||||
|
|
|
@ -46,7 +46,7 @@ import org.apache.lucene.index.DirectoryReader;
|
|||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.MultiTerms;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.Terms;
|
||||
|
@ -337,7 +337,7 @@ public class TestFSTs extends LuceneTestCase {
|
|||
System.out.println("FST stores docFreq");
|
||||
}
|
||||
}
|
||||
Terms terms = MultiFields.getTerms(r, "body");
|
||||
Terms terms = MultiTerms.getTerms(r, "body");
|
||||
if (terms != null) {
|
||||
final IntsRefBuilder scratchIntsRef = new IntsRefBuilder();
|
||||
final TermsEnum termsEnum = terms.iterator();
|
||||
|
@ -917,7 +917,7 @@ public class TestFSTs extends LuceneTestCase {
|
|||
}
|
||||
|
||||
// Verify w/ MultiTermsEnum
|
||||
final TermsEnum termsEnum = MultiFields.getTerms(r, "id").iterator();
|
||||
final TermsEnum termsEnum = MultiTerms.getTerms(r, "id").iterator();
|
||||
for(int iter=0;iter<2*NUM_IDS;iter++) {
|
||||
final String id;
|
||||
final String nextID;
|
||||
|
|
|
@ -35,7 +35,7 @@ import org.apache.lucene.index.CorruptIndexException; // javadocs
|
|||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.MultiTerms;
|
||||
import org.apache.lucene.index.PostingsEnum;
|
||||
import org.apache.lucene.index.SegmentReader;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
|
@ -284,7 +284,7 @@ public class DirectoryTaxonomyReader extends TaxonomyReader implements Accountab
|
|||
// If we're still here, we have a cache miss. We need to fetch the
|
||||
// value from disk, and then also put it in the cache:
|
||||
int ret = TaxonomyReader.INVALID_ORDINAL;
|
||||
PostingsEnum docs = MultiFields.getTermDocsEnum(indexReader, Consts.FULL, new BytesRef(FacetsConfig.pathToString(cp.components, cp.length)), 0);
|
||||
PostingsEnum docs = MultiTerms.getTermPostingsEnum(indexReader, Consts.FULL, new BytesRef(FacetsConfig.pathToString(cp.components, cp.length)), 0);
|
||||
if (docs != null && docs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
|
||||
ret = docs.docID();
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@ import org.apache.lucene.facet.taxonomy.ParallelTaxonomyArrays;
|
|||
import org.apache.lucene.facet.taxonomy.TaxonomyReader;
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.MultiTerms;
|
||||
import org.apache.lucene.index.PostingsEnum;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.util.Accountable;
|
||||
|
@ -132,10 +132,10 @@ class TaxonomyIndexArrays extends ParallelTaxonomyArrays implements Accountable
|
|||
return;
|
||||
}
|
||||
|
||||
// it's ok to use MultiFields because we only iterate on one posting list.
|
||||
// it's ok to use MultiTerms because we only iterate on one posting list.
|
||||
// breaking it to loop over the leaves() only complicates code for no
|
||||
// apparent gain.
|
||||
PostingsEnum positions = MultiFields.getTermPositionsEnum(reader,
|
||||
PostingsEnum positions = MultiTerms.getTermPostingsEnum(reader,
|
||||
Consts.FIELD_PAYLOADS, Consts.PAYLOAD_PARENT_BYTES_REF,
|
||||
PostingsEnum.PAYLOADS);
|
||||
|
||||
|
|
|
@ -46,7 +46,6 @@ import org.apache.lucene.index.IndexOptions;
|
|||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.MultiReader;
|
||||
import org.apache.lucene.index.ReaderUtil;
|
||||
import org.apache.lucene.index.StoredFieldVisitor;
|
||||
|
@ -370,7 +369,7 @@ public class UnifiedHighlighter {
|
|||
synchronized (this) {
|
||||
fieldInfos = this.fieldInfos;
|
||||
if (fieldInfos == null) {
|
||||
fieldInfos = MultiFields.getMergedFieldInfos(searcher.getIndexReader());
|
||||
fieldInfos = FieldInfos.getMergedFieldInfos(searcher.getIndexReader());
|
||||
this.fieldInfos = fieldInfos;
|
||||
}
|
||||
|
||||
|
|
|
@ -44,7 +44,8 @@ import org.apache.lucene.index.IndexWriter;
|
|||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.LogDocMergePolicy;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.MultiBits;
|
||||
import org.apache.lucene.index.MultiTerms;
|
||||
import org.apache.lucene.index.NoMergePolicy;
|
||||
import org.apache.lucene.index.PostingsEnum;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
|
@ -651,11 +652,11 @@ public class TestBlockJoin extends LuceneTestCase {
|
|||
System.out.println("TEST: reader=" + r);
|
||||
System.out.println("TEST: joinReader=" + joinR);
|
||||
|
||||
Bits liveDocs = MultiFields.getLiveDocs(joinR);
|
||||
Bits liveDocs = MultiBits.getLiveDocs(joinR);
|
||||
for(int docIDX=0;docIDX<joinR.maxDoc();docIDX++) {
|
||||
System.out.println(" docID=" + docIDX + " doc=" + joinR.document(docIDX) + " deleted?=" + (liveDocs != null && liveDocs.get(docIDX) == false));
|
||||
}
|
||||
PostingsEnum parents = MultiFields.getTermDocsEnum(joinR, "isParent", new BytesRef("x"));
|
||||
PostingsEnum parents = MultiTerms.getTermPostingsEnum(joinR, "isParent", new BytesRef("x"), (int) PostingsEnum.FREQS);
|
||||
System.out.println("parent docIDs:");
|
||||
while (parents.nextDoc() != PostingsEnum.NO_MORE_DOCS) {
|
||||
System.out.println(" " + parents.docID());
|
||||
|
|
|
@ -55,7 +55,7 @@ import org.apache.lucene.index.IndexReader;
|
|||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.MultiTerms;
|
||||
import org.apache.lucene.index.NoMergePolicy;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.index.OrdinalMap;
|
||||
|
@ -1507,7 +1507,7 @@ public class TestJoinUtil extends LuceneTestCase {
|
|||
|
||||
final Map<Integer, JoinScore> docToJoinScore = new HashMap<>();
|
||||
if (multipleValuesPerDocument) {
|
||||
Terms terms = MultiFields.getTerms(topLevelReader, toField);
|
||||
Terms terms = MultiTerms.getTerms(topLevelReader, toField);
|
||||
if (terms != null) {
|
||||
PostingsEnum postingsEnum = null;
|
||||
SortedSet<BytesRef> joinValues = new TreeSet<>();
|
||||
|
@ -1674,7 +1674,7 @@ public class TestJoinUtil extends LuceneTestCase {
|
|||
}
|
||||
|
||||
for (RandomDoc otherSideDoc : otherMatchingDocs) {
|
||||
PostingsEnum postingsEnum = MultiFields.getTermDocsEnum(topLevelReader, "id", new BytesRef(otherSideDoc.id), 0);
|
||||
PostingsEnum postingsEnum = MultiTerms.getTermPostingsEnum(topLevelReader, "id", new BytesRef(otherSideDoc.id), 0);
|
||||
assert postingsEnum != null;
|
||||
int doc = postingsEnum.nextDoc();
|
||||
expectedResult.set(doc);
|
||||
|
|
|
@ -171,7 +171,7 @@ public class TestMemoryIndexAgainstRAMDir extends BaseTokenStreamTestCase {
|
|||
private void duellReaders(CompositeReader other, LeafReader memIndexReader)
|
||||
throws IOException {
|
||||
Fields memFields = memIndexReader.getTermVectors(0);
|
||||
for (String field : MultiFields.getIndexedFields(other)) {
|
||||
for (String field : FieldInfos.getIndexedFields(other)) {
|
||||
Terms memTerms = memFields.terms(field);
|
||||
Terms iwTerms = memIndexReader.terms(field);
|
||||
if (iwTerms == null) {
|
||||
|
|
|
@ -23,8 +23,9 @@ import java.util.Comparator;
|
|||
import java.util.Locale;
|
||||
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.FieldInfos;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.MultiTerms;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.store.Directory;
|
||||
|
@ -98,7 +99,7 @@ public class HighFreqTerms {
|
|||
TermStatsQueue tiq = null;
|
||||
|
||||
if (field != null) {
|
||||
Terms terms = MultiFields.getTerms(reader, field);
|
||||
Terms terms = MultiTerms.getTerms(reader, field);
|
||||
if (terms == null) {
|
||||
throw new RuntimeException("field " + field + " not found");
|
||||
}
|
||||
|
@ -107,13 +108,13 @@ public class HighFreqTerms {
|
|||
tiq = new TermStatsQueue(numTerms, comparator);
|
||||
tiq.fill(field, termsEnum);
|
||||
} else {
|
||||
Collection<String> fields = MultiFields.getIndexedFields(reader);
|
||||
Collection<String> fields = FieldInfos.getIndexedFields(reader);
|
||||
if (fields.size() == 0) {
|
||||
throw new RuntimeException("no fields found for this index");
|
||||
}
|
||||
tiq = new TermStatsQueue(numTerms, comparator);
|
||||
for (String fieldName : fields) {
|
||||
Terms terms = MultiFields.getTerms(reader, fieldName);
|
||||
Terms terms = MultiTerms.getTerms(reader, fieldName);
|
||||
if (terms != null) {
|
||||
tiq.fill(fieldName, terms.iterator());
|
||||
}
|
||||
|
|
|
@ -70,7 +70,7 @@ public class TestMultiPassIndexSplitter extends LuceneTestCase {
|
|||
assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1); // rounding error
|
||||
Document doc = ir.document(0);
|
||||
assertEquals("0", doc.get("id"));
|
||||
TermsEnum te = MultiFields.getTerms(ir, "id").iterator();
|
||||
TermsEnum te = MultiTerms.getTerms(ir, "id").iterator();
|
||||
assertEquals(TermsEnum.SeekStatus.NOT_FOUND, te.seekCeil(new BytesRef("1")));
|
||||
assertNotSame("1", te.term().utf8ToString());
|
||||
ir.close();
|
||||
|
@ -78,7 +78,7 @@ public class TestMultiPassIndexSplitter extends LuceneTestCase {
|
|||
assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1);
|
||||
doc = ir.document(0);
|
||||
assertEquals("1", doc.get("id"));
|
||||
te = MultiFields.getTerms(ir, "id").iterator();
|
||||
te = MultiTerms.getTerms(ir, "id").iterator();
|
||||
assertEquals(TermsEnum.SeekStatus.NOT_FOUND, te.seekCeil(new BytesRef("0")));
|
||||
|
||||
assertNotSame("0", te.term().utf8ToString());
|
||||
|
@ -88,7 +88,7 @@ public class TestMultiPassIndexSplitter extends LuceneTestCase {
|
|||
doc = ir.document(0);
|
||||
assertEquals("2", doc.get("id"));
|
||||
|
||||
te = MultiFields.getTerms(ir, "id").iterator();
|
||||
te = MultiTerms.getTerms(ir, "id").iterator();
|
||||
assertEquals(TermsEnum.SeekStatus.NOT_FOUND, te.seekCeil(new BytesRef("1")));
|
||||
assertNotSame("1", te.term());
|
||||
|
||||
|
@ -128,7 +128,7 @@ public class TestMultiPassIndexSplitter extends LuceneTestCase {
|
|||
doc = ir.document(0);
|
||||
assertEquals(start + "", doc.get("id"));
|
||||
// make sure the deleted doc is not here
|
||||
TermsEnum te = MultiFields.getTerms(ir, "id").iterator();
|
||||
TermsEnum te = MultiTerms.getTerms(ir, "id").iterator();
|
||||
Term t = new Term("id", (NUM_DOCS - 1) + "");
|
||||
assertEquals(TermsEnum.SeekStatus.NOT_FOUND, te.seekCeil(new BytesRef(t.text())));
|
||||
assertNotSame(t.text(), te.term().utf8ToString());
|
||||
|
|
|
@ -89,7 +89,7 @@ public class TestPKIndexSplitter extends LuceneTestCase {
|
|||
}
|
||||
|
||||
private void checkContents(IndexReader ir, String indexname) throws Exception {
|
||||
final Bits liveDocs = MultiFields.getLiveDocs(ir);
|
||||
final Bits liveDocs = MultiBits.getLiveDocs(ir);
|
||||
for (int i = 0; i < ir.maxDoc(); i++) {
|
||||
if (liveDocs == null || liveDocs.get(i)) {
|
||||
assertEquals(indexname, ir.document(i).get("indexname"));
|
||||
|
|
|
@ -23,7 +23,7 @@ import org.apache.lucene.index.BinaryDocValues;
|
|||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.MultiTerms;
|
||||
import org.apache.lucene.index.ReaderUtil;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
|
@ -57,7 +57,7 @@ public class JoinDocFreqValueSource extends FieldCacheSource {
|
|||
{
|
||||
final BinaryDocValues terms = DocValues.getBinary(readerContext.reader(), field);
|
||||
final IndexReader top = ReaderUtil.getTopLevelContext(readerContext).reader();
|
||||
Terms t = MultiFields.getTerms(top, qfield);
|
||||
Terms t = MultiTerms.getTerms(top, qfield);
|
||||
final TermsEnum termsEnum = t == null ? TermsEnum.EMPTY : t.iterator();
|
||||
|
||||
return new IntDocValues(this) {
|
||||
|
|
|
@ -29,10 +29,10 @@ import org.apache.lucene.analysis.Analyzer;
|
|||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.index.FieldInfos;
|
||||
import org.apache.lucene.index.Fields;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
|
@ -577,7 +577,7 @@ public final class MoreLikeThis {
|
|||
public Query like(int docNum) throws IOException {
|
||||
if (fieldNames == null) {
|
||||
// gather list of valid fields from lucene
|
||||
Collection<String> fields = MultiFields.getIndexedFields(ir);
|
||||
Collection<String> fields = FieldInfos.getIndexedFields(ir);
|
||||
fieldNames = fields.toArray(new String[fields.size()]);
|
||||
}
|
||||
|
||||
|
@ -592,7 +592,7 @@ public final class MoreLikeThis {
|
|||
public Query like(Map<String, Collection<Object>> filteredDocument) throws IOException {
|
||||
if (fieldNames == null) {
|
||||
// gather list of valid fields from lucene
|
||||
Collection<String> fields = MultiFields.getIndexedFields(ir);
|
||||
Collection<String> fields = FieldInfos.getIndexedFields(ir);
|
||||
fieldNames = fields.toArray(new String[fields.size()]);
|
||||
}
|
||||
return createQuery(retrieveTerms(filteredDocument));
|
||||
|
|
|
@ -15,13 +15,13 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.queryparser.surround.query;
|
||||
import org.apache.lucene.index.MultiTerms;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.StringHelper;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -61,7 +61,7 @@ public class SrndPrefixQuery extends SimpleTerm {
|
|||
MatchingTermVisitor mtv) throws IOException
|
||||
{
|
||||
/* inspired by PrefixQuery.rewrite(): */
|
||||
Terms terms = MultiFields.getTerms(reader, fieldName);
|
||||
Terms terms = MultiTerms.getTerms(reader, fieldName);
|
||||
if (terms != null) {
|
||||
TermsEnum termsEnum = terms.iterator();
|
||||
|
||||
|
|
|
@ -18,10 +18,10 @@ package org.apache.lucene.queryparser.surround.query;
|
|||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.MultiTerms;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
||||
|
||||
|
@ -51,7 +51,7 @@ public class SrndTermQuery extends SimpleTerm {
|
|||
MatchingTermVisitor mtv) throws IOException
|
||||
{
|
||||
/* check term presence in index here for symmetry with other SimpleTerm's */
|
||||
Terms terms = MultiFields.getTerms(reader, fieldName);
|
||||
Terms terms = MultiTerms.getTerms(reader, fieldName);
|
||||
if (terms != null) {
|
||||
TermsEnum termsEnum = terms.iterator();
|
||||
|
||||
|
|
|
@ -15,13 +15,13 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.queryparser.surround.query;
|
||||
import org.apache.lucene.index.MultiTerms;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.StringHelper;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -91,7 +91,7 @@ public class SrndTruncQuery extends SimpleTerm {
|
|||
MatchingTermVisitor mtv) throws IOException
|
||||
{
|
||||
int prefixLength = prefix.length();
|
||||
Terms terms = MultiFields.getTerms(reader, fieldName);
|
||||
Terms terms = MultiTerms.getTerms(reader, fieldName);
|
||||
if (terms != null) {
|
||||
Matcher matcher = pattern.matcher("");
|
||||
try {
|
||||
|
|
|
@ -28,7 +28,7 @@ import org.apache.lucene.analysis.TokenStream;
|
|||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.MultiTerms;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.TermStates;
|
||||
import org.apache.lucene.index.Terms;
|
||||
|
@ -188,7 +188,7 @@ public class FuzzyLikeThisQuery extends Query
|
|||
|
||||
private void addTerms(IndexReader reader, FieldVals f, ScoreTermQueue q) throws IOException {
|
||||
if (f.queryString == null) return;
|
||||
final Terms terms = MultiFields.getTerms(reader, f.fieldName);
|
||||
final Terms terms = MultiTerms.getTerms(reader, f.fieldName);
|
||||
if (terms == null) {
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -33,8 +33,8 @@ import org.apache.lucene.index.IndexReader;
|
|||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.MultiBits;
|
||||
import org.apache.lucene.index.MultiDocValues;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.index.SerialMergeScheduler;
|
||||
import org.apache.lucene.index.Term;
|
||||
|
@ -232,7 +232,7 @@ public abstract class BaseLatLonShapeTestCase extends LuceneTestCase {
|
|||
|
||||
final int iters = atLeast(75);
|
||||
|
||||
Bits liveDocs = MultiFields.getLiveDocs(s.getIndexReader());
|
||||
Bits liveDocs = MultiBits.getLiveDocs(s.getIndexReader());
|
||||
int maxDoc = s.getIndexReader().maxDoc();
|
||||
|
||||
for (int iter = 0; iter < iters; ++iter) {
|
||||
|
@ -328,7 +328,7 @@ public abstract class BaseLatLonShapeTestCase extends LuceneTestCase {
|
|||
|
||||
final int iters = atLeast(75);
|
||||
|
||||
Bits liveDocs = MultiFields.getLiveDocs(s.getIndexReader());
|
||||
Bits liveDocs = MultiBits.getLiveDocs(s.getIndexReader());
|
||||
int maxDoc = s.getIndexReader().maxDoc();
|
||||
|
||||
for (int iter = 0; iter < iters; ++iter) {
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
package org.apache.lucene.search.spell;
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.MultiTerms;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.search.BoostAttribute;
|
||||
|
@ -402,7 +402,7 @@ public class DirectSpellChecker {
|
|||
AttributeSource atts = new AttributeSource();
|
||||
MaxNonCompetitiveBoostAttribute maxBoostAtt =
|
||||
atts.addAttribute(MaxNonCompetitiveBoostAttribute.class);
|
||||
Terms terms = MultiFields.getTerms(ir, term.field());
|
||||
Terms terms = MultiTerms.getTerms(ir, term.field());
|
||||
if (terms == null) {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
|
|
|
@ -20,9 +20,9 @@ import java.io.IOException;
|
|||
import java.util.Set;
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.MultiTerms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.search.suggest.InputIterator;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.BytesRefBuilder;
|
||||
|
@ -67,7 +67,7 @@ public class HighFrequencyDictionary implements Dictionary {
|
|||
private long freq;
|
||||
|
||||
HighFrequencyIterator() throws IOException {
|
||||
Terms terms = MultiFields.getTerms(reader, field);
|
||||
Terms terms = MultiTerms.getTerms(reader, field);
|
||||
if (terms != null) {
|
||||
termsEnum = terms.iterator();
|
||||
} else {
|
||||
|
|
|
@ -17,9 +17,9 @@
|
|||
package org.apache.lucene.search.spell;
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.MultiTerms;
|
||||
import org.apache.lucene.search.suggest.InputIterator;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
|
||||
import java.io.*;
|
||||
|
||||
|
@ -42,7 +42,7 @@ public class LuceneDictionary implements Dictionary {
|
|||
|
||||
@Override
|
||||
public final InputIterator getEntryIterator() throws IOException {
|
||||
final Terms terms = MultiFields.getTerms(reader, field);
|
||||
final Terms terms = MultiTerms.getTerms(reader, field);
|
||||
if (terms != null) {
|
||||
return new InputIterator.InputIteratorWrapper(terms.iterator());
|
||||
} else {
|
||||
|
|
|
@ -24,8 +24,8 @@ import java.util.Set;
|
|||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.index.MultiBits;
|
||||
import org.apache.lucene.index.MultiDocValues;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.search.spell.Dictionary;
|
||||
import org.apache.lucene.util.Bits;
|
||||
|
@ -128,7 +128,7 @@ public class DocumentDictionary implements Dictionary {
|
|||
this.hasContexts = hasContexts;
|
||||
docCount = reader.maxDoc() - 1;
|
||||
weightValues = (weightField != null) ? MultiDocValues.getNumericValues(reader, weightField) : null;
|
||||
liveDocs = (reader.leaves().size() > 0) ? MultiFields.getLiveDocs(reader) : null;
|
||||
liveDocs = (reader.leaves().size() > 0) ? MultiBits.getLiveDocs(reader) : null;
|
||||
relevantFields = getRelevantFields(new String [] {field, weightField, payloadField, contextsField});
|
||||
}
|
||||
|
||||
|
|
|
@ -49,7 +49,7 @@ import org.apache.lucene.index.IndexOptions;
|
|||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.MultiTerms;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.search.suggest.InputIterator;
|
||||
|
@ -295,7 +295,7 @@ public class FreeTextSuggester extends Lookup implements Accountable {
|
|||
}
|
||||
reader = DirectoryReader.open(writer);
|
||||
|
||||
Terms terms = MultiFields.getTerms(reader, "body");
|
||||
Terms terms = MultiTerms.getTerms(reader, "body");
|
||||
if (terms == null) {
|
||||
throw new IllegalArgumentException("need at least one suggestion");
|
||||
}
|
||||
|
|
|
@ -42,8 +42,8 @@ import org.apache.lucene.index.IndexReader;
|
|||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.MultiBits;
|
||||
import org.apache.lucene.index.MultiDocValues;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.index.SegmentReadState;
|
||||
|
@ -801,7 +801,7 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase {
|
|||
|
||||
int iters = atLeast(25);
|
||||
|
||||
Bits liveDocs = MultiFields.getLiveDocs(s.getIndexReader());
|
||||
Bits liveDocs = MultiBits.getLiveDocs(s.getIndexReader());
|
||||
int maxDoc = s.getIndexReader().maxDoc();
|
||||
|
||||
for (int iter=0;iter<iters;iter++) {
|
||||
|
@ -930,7 +930,7 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase {
|
|||
|
||||
int iters = atLeast(25);
|
||||
|
||||
Bits liveDocs = MultiFields.getLiveDocs(s.getIndexReader());
|
||||
Bits liveDocs = MultiBits.getLiveDocs(s.getIndexReader());
|
||||
int maxDoc = s.getIndexReader().maxDoc();
|
||||
|
||||
for (int iter=0;iter<iters;iter++) {
|
||||
|
@ -1072,7 +1072,7 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase {
|
|||
|
||||
final int iters = atLeast(75);
|
||||
|
||||
Bits liveDocs = MultiFields.getLiveDocs(s.getIndexReader());
|
||||
Bits liveDocs = MultiBits.getLiveDocs(s.getIndexReader());
|
||||
int maxDoc = s.getIndexReader().maxDoc();
|
||||
|
||||
for (int iter=0;iter<iters;iter++) {
|
||||
|
|
|
@ -360,7 +360,7 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase {
|
|||
};
|
||||
try (FieldsConsumer consumer = codec.postingsFormat().fieldsConsumer(writeState)) {
|
||||
final Fields fields = new Fields() {
|
||||
TreeSet<String> indexedFields = new TreeSet<>(MultiFields.getIndexedFields(oneDocReader));
|
||||
TreeSet<String> indexedFields = new TreeSet<>(FieldInfos.getIndexedFields(oneDocReader));
|
||||
|
||||
@Override
|
||||
public Iterator<String> iterator() {
|
||||
|
|
|
@ -167,7 +167,7 @@ public abstract class BasePointsFormatTestCase extends BaseIndexFileFormatTestCa
|
|||
w.close();
|
||||
DirectoryReader r = DirectoryReader.open(dir);
|
||||
assertEquals(1, r.numDocs());
|
||||
Bits liveDocs = MultiFields.getLiveDocs(r);
|
||||
Bits liveDocs = MultiBits.getLiveDocs(r);
|
||||
|
||||
for(LeafReaderContext ctx : r.leaves()) {
|
||||
PointValues values = ctx.reader().getPointValues("dim");
|
||||
|
@ -756,7 +756,7 @@ public abstract class BasePointsFormatTestCase extends BaseIndexFileFormatTestCa
|
|||
}
|
||||
}
|
||||
|
||||
Bits liveDocs = MultiFields.getLiveDocs(r);
|
||||
Bits liveDocs = MultiBits.getLiveDocs(r);
|
||||
|
||||
// Verify min/max values are correct:
|
||||
byte[] minValues = new byte[numIndexDims*numBytesPerDim];
|
||||
|
|
|
@ -551,7 +551,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
|
|||
IndexReader r = w.getReader();
|
||||
w.close();
|
||||
|
||||
Terms terms = MultiFields.getTerms(r, "body");
|
||||
Terms terms = MultiTerms.getTerms(r, "body");
|
||||
assertEquals(sumDocFreq.get(), terms.getSumDocFreq());
|
||||
assertEquals(sumTotalTermFreq.get(), terms.getSumTotalTermFreq());
|
||||
|
||||
|
|
|
@ -356,7 +356,7 @@ public abstract class ThreadedIndexingAndSearchingTestCase extends LuceneTestCas
|
|||
}
|
||||
if (s.getIndexReader().numDocs() > 0) {
|
||||
smokeTestSearcher(s);
|
||||
Terms terms = MultiFields.getTerms(s.getIndexReader(), "body");
|
||||
Terms terms = MultiTerms.getTerms(s.getIndexReader(), "body");
|
||||
if (terms == null) {
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -28,8 +28,8 @@ import org.apache.lucene.index.IndexReader;
|
|||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.MultiBits;
|
||||
import org.apache.lucene.index.MultiDocValues;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.index.SerialMergeScheduler;
|
||||
import org.apache.lucene.index.Term;
|
||||
|
@ -207,7 +207,7 @@ public abstract class BaseRangeFieldQueryTestCase extends LuceneTestCase {
|
|||
|
||||
int dimensions = ranges[0][0].numDimensions();
|
||||
int iters = atLeast(25);
|
||||
Bits liveDocs = MultiFields.getLiveDocs(s.getIndexReader());
|
||||
Bits liveDocs = MultiBits.getLiveDocs(s.getIndexReader());
|
||||
int maxDoc = s.getIndexReader().maxDoc();
|
||||
|
||||
for (int iter=0; iter<iters; ++iter) {
|
||||
|
|
|
@ -1975,12 +1975,12 @@ public abstract class LuceneTestCase extends Assert {
|
|||
* Fields api equivalency
|
||||
*/
|
||||
public void assertTermsEquals(String info, IndexReader leftReader, IndexReader rightReader, boolean deep) throws IOException {
|
||||
Set<String> leftFields = new HashSet<>(MultiFields.getIndexedFields(leftReader));
|
||||
Set<String> rightFields = new HashSet<>(MultiFields.getIndexedFields(rightReader));
|
||||
Set<String> leftFields = new HashSet<>(FieldInfos.getIndexedFields(leftReader));
|
||||
Set<String> rightFields = new HashSet<>(FieldInfos.getIndexedFields(rightReader));
|
||||
assertEquals(info, leftFields, rightFields);
|
||||
|
||||
for (String field : leftFields) {
|
||||
assertTermsEquals(info, leftReader, MultiFields.getTerms(leftReader, field), MultiFields.getTerms(rightReader, field), deep);
|
||||
assertTermsEquals(info, leftReader, MultiTerms.getTerms(leftReader, field), MultiTerms.getTerms(rightReader, field), deep);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2311,8 +2311,8 @@ public abstract class LuceneTestCase extends Assert {
|
|||
* checks that norms are the same across all fields
|
||||
*/
|
||||
public void assertNormsEquals(String info, IndexReader leftReader, IndexReader rightReader) throws IOException {
|
||||
Set<String> leftFields = new HashSet<>(MultiFields.getIndexedFields(leftReader));
|
||||
Set<String> rightFields = new HashSet<>(MultiFields.getIndexedFields(rightReader));
|
||||
Set<String> leftFields = new HashSet<>(FieldInfos.getIndexedFields(leftReader));
|
||||
Set<String> rightFields = new HashSet<>(FieldInfos.getIndexedFields(rightReader));
|
||||
assertEquals(info, leftFields, rightFields);
|
||||
|
||||
for (String field : leftFields) {
|
||||
|
@ -2406,7 +2406,7 @@ public abstract class LuceneTestCase extends Assert {
|
|||
|
||||
private static Set<String> getDVFields(IndexReader reader) {
|
||||
Set<String> fields = new HashSet<>();
|
||||
for(FieldInfo fi : MultiFields.getMergedFieldInfos(reader)) {
|
||||
for(FieldInfo fi : FieldInfos.getMergedFieldInfos(reader)) {
|
||||
if (fi.getDocValuesType() != DocValuesType.NONE) {
|
||||
fields.add(fi.name);
|
||||
}
|
||||
|
@ -2551,8 +2551,8 @@ public abstract class LuceneTestCase extends Assert {
|
|||
// TODO: this is kinda stupid, we don't delete documents in the test.
|
||||
public void assertDeletedDocsEquals(String info, IndexReader leftReader, IndexReader rightReader) throws IOException {
|
||||
assert leftReader.numDeletedDocs() == rightReader.numDeletedDocs();
|
||||
Bits leftBits = MultiFields.getLiveDocs(leftReader);
|
||||
Bits rightBits = MultiFields.getLiveDocs(rightReader);
|
||||
Bits leftBits = MultiBits.getLiveDocs(leftReader);
|
||||
Bits rightBits = MultiBits.getLiveDocs(rightReader);
|
||||
|
||||
if (leftBits == null || rightBits == null) {
|
||||
assertNull(info, leftBits);
|
||||
|
@ -2568,8 +2568,8 @@ public abstract class LuceneTestCase extends Assert {
|
|||
}
|
||||
|
||||
public void assertFieldInfosEquals(String info, IndexReader leftReader, IndexReader rightReader) throws IOException {
|
||||
FieldInfos leftInfos = MultiFields.getMergedFieldInfos(leftReader);
|
||||
FieldInfos rightInfos = MultiFields.getMergedFieldInfos(rightReader);
|
||||
FieldInfos leftInfos = FieldInfos.getMergedFieldInfos(leftReader);
|
||||
FieldInfos rightInfos = FieldInfos.getMergedFieldInfos(rightReader);
|
||||
|
||||
// TODO: would be great to verify more than just the names of the fields!
|
||||
TreeSet<String> left = new TreeSet<>();
|
||||
|
@ -2624,8 +2624,8 @@ public abstract class LuceneTestCase extends Assert {
|
|||
}
|
||||
|
||||
public void assertPointsEquals(String info, IndexReader leftReader, IndexReader rightReader) throws IOException {
|
||||
FieldInfos fieldInfos1 = MultiFields.getMergedFieldInfos(leftReader);
|
||||
FieldInfos fieldInfos2 = MultiFields.getMergedFieldInfos(rightReader);
|
||||
FieldInfos fieldInfos1 = FieldInfos.getMergedFieldInfos(leftReader);
|
||||
FieldInfos fieldInfos2 = FieldInfos.getMergedFieldInfos(rightReader);
|
||||
for(FieldInfo fieldInfo1 : fieldInfos1) {
|
||||
if (fieldInfo1.getPointDataDimensionCount() != 0) {
|
||||
FieldInfo fieldInfo2 = fieldInfos2.fieldInfo(fieldInfo1.name);
|
||||
|
|
|
@ -63,29 +63,7 @@ import org.apache.lucene.document.Document;
|
|||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.NumericDocValuesField;
|
||||
import org.apache.lucene.document.SortedDocValuesField;
|
||||
import org.apache.lucene.index.CheckIndex;
|
||||
import org.apache.lucene.index.CodecReader;
|
||||
import org.apache.lucene.index.ConcurrentMergeScheduler;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.DocValuesType;
|
||||
import org.apache.lucene.index.FieldInfo;
|
||||
import org.apache.lucene.index.FilterLeafReader;
|
||||
import org.apache.lucene.index.IndexFileNames;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.LogMergePolicy;
|
||||
import org.apache.lucene.index.MergePolicy;
|
||||
import org.apache.lucene.index.MergeScheduler;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.PostingsEnum;
|
||||
import org.apache.lucene.index.SegmentReader;
|
||||
import org.apache.lucene.index.SlowCodecReaderWrapper;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.index.TieredMergePolicy;
|
||||
import org.apache.lucene.index.*;
|
||||
import org.apache.lucene.mockfile.FilterFileSystem;
|
||||
import org.apache.lucene.mockfile.VirusCheckingFS;
|
||||
import org.apache.lucene.mockfile.WindowsFS;
|
||||
|
@ -1115,7 +1093,7 @@ public final class TestUtil {
|
|||
// DocsAndFreqsEnum, DocsAndPositionsEnum. Returns null
|
||||
// if field/term doesn't exist:
|
||||
public static PostingsEnum docs(Random random, IndexReader r, String field, BytesRef term, PostingsEnum reuse, int flags) throws IOException {
|
||||
final Terms terms = MultiFields.getTerms(r, field);
|
||||
final Terms terms = MultiTerms.getTerms(r, field);
|
||||
if (terms == null) {
|
||||
return null;
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue