mirror of https://github.com/apache/lucene.git
Migrate away from per-segment-per-threadlocals on SegmentReader (#11998)
Add new stored fields and termvectors interfaces: IndexReader.storedFields() and IndexReader.termVectors(). Deprecate IndexReader.document() and IndexReader.getTermVector(). The new APIs do not rely upon ThreadLocal storage for each index segment, which can greatly reduce RAM requirements when there are many threads and/or segments. Co-authored-by: Adrien Grand <jpountz@gmail.com>
This commit is contained in:
parent
ef5766aa81
commit
47f8c1baa2
|
@ -96,6 +96,12 @@ Other
|
|||
|
||||
API Changes
|
||||
---------------------
|
||||
* GITHUB#11998: Add new stored fields and termvectors interfaces: IndexReader.storedFields()
|
||||
and IndexReader.termVectors(). Deprecate IndexReader.document() and IndexReader.getTermVector().
|
||||
The new APIs do not rely upon ThreadLocal storage for each index segment, which can greatly
|
||||
reduce RAM requirements when there are many threads and/or segments.
|
||||
(Adrien Grand, Robert Muir)
|
||||
|
||||
* GITHUB#11742: MatchingFacetSetsCounts#getTopChildren now properly returns "top" children instead
|
||||
of all children. (Greg Miller)
|
||||
|
||||
|
@ -129,7 +135,6 @@ API Changes
|
|||
* GITHUB#11984: Improved TimeLimitBulkScorer to check the timeout at exponantial rate.
|
||||
(Costin Leau)
|
||||
|
||||
|
||||
New Features
|
||||
---------------------
|
||||
* GITHUB#11795: Add ByteWritesTrackingDirectoryWrapper to expose metrics for bytes merged, flushed, and overall
|
||||
|
|
|
@ -19,6 +19,25 @@
|
|||
|
||||
## Migration from Lucene 9.x to Lucene 10.0
|
||||
|
||||
### Removed deprecated IndexSearcher.doc, IndexReader.document, IndexReader.getTermVectors (GITHUB#11998)
|
||||
|
||||
The deprecated Stored Fields and Term Vectors apis relied upon threadlocal storage and have been removed.
|
||||
|
||||
Instead, call storedFields()/termVectors() to return an instance which can fetch data for multiple documents,
|
||||
and will be garbage-collected as usual.
|
||||
|
||||
For example:
|
||||
```java
|
||||
TopDocs hits = searcher.search(query, 10);
|
||||
StoredFields storedFields = reader.storedFields();
|
||||
for (ScoreDoc hit : hits.scoreDocs) {
|
||||
Document doc = storedFields.document(hit.doc);
|
||||
}
|
||||
```
|
||||
|
||||
Note that these StoredFields and TermVectors instances should only be consumed in the thread where
|
||||
they were acquired. For instance, it is illegal to share them across threads.
|
||||
|
||||
### PersianStemFilter is added to PersianAnalyzer (LUCENE-10312)
|
||||
|
||||
PersianAnalyzer now includes PersianStemFilter, that would change analysis results. If you need the exactly same analysis
|
||||
|
|
|
@ -95,7 +95,7 @@ public class TestTeeSinkTokenFilter extends BaseTokenStreamTestCase {
|
|||
w.close();
|
||||
|
||||
IndexReader r = DirectoryReader.open(dir);
|
||||
Terms vector = r.getTermVectors(0).terms("field");
|
||||
Terms vector = r.termVectors().get(0).terms("field");
|
||||
assertEquals(1, vector.size());
|
||||
TermsEnum termsEnum = vector.iterator();
|
||||
termsEnum.next();
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.apache.lucene.document.StringField;
|
|||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.MultiDocValues;
|
||||
import org.apache.lucene.index.SortedDocValues;
|
||||
import org.apache.lucene.index.StoredFields;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||
import org.apache.lucene.search.Sort;
|
||||
|
@ -64,8 +65,9 @@ public class TestCollationDocValuesField extends LuceneTestCase {
|
|||
SortField sortField = new SortField("collated", SortField.Type.STRING);
|
||||
|
||||
TopDocs td = is.search(new MatchAllDocsQuery(), 5, new Sort(sortField));
|
||||
assertEquals("abc", ir.document(td.scoreDocs[0].doc).get("field"));
|
||||
assertEquals("ABC", ir.document(td.scoreDocs[1].doc).get("field"));
|
||||
StoredFields storedFields = ir.storedFields();
|
||||
assertEquals("abc", storedFields.document(td.scoreDocs[0].doc).get("field"));
|
||||
assertEquals("ABC", storedFields.document(td.scoreDocs[1].doc).get("field"));
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
@ -120,8 +122,9 @@ public class TestCollationDocValuesField extends LuceneTestCase {
|
|||
Collator collator)
|
||||
throws Exception {
|
||||
SortedDocValues dvs = MultiDocValues.getSortedValues(is.getIndexReader(), "collated");
|
||||
StoredFields storedFields = is.storedFields();
|
||||
for (int docID = 0; docID < is.getIndexReader().maxDoc(); docID++) {
|
||||
Document doc = is.doc(docID);
|
||||
Document doc = storedFields.document(docID);
|
||||
String s = doc.getField("field").stringValue();
|
||||
boolean collatorAccepts =
|
||||
collate(collator, s, startPoint) >= 0 && collate(collator, s, endPoint) <= 0;
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.apache.lucene.document.StringField;
|
|||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.MultiDocValues;
|
||||
import org.apache.lucene.index.SortedDocValues;
|
||||
import org.apache.lucene.index.StoredFields;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||
import org.apache.lucene.search.Sort;
|
||||
|
@ -64,8 +65,9 @@ public class TestICUCollationDocValuesField extends LuceneTestCase {
|
|||
SortField sortField = new SortField("collated", SortField.Type.STRING);
|
||||
|
||||
TopDocs td = is.search(new MatchAllDocsQuery(), 5, new Sort(sortField));
|
||||
assertEquals("abc", ir.document(td.scoreDocs[0].doc).get("field"));
|
||||
assertEquals("ABC", ir.document(td.scoreDocs[1].doc).get("field"));
|
||||
StoredFields storedFields = ir.storedFields();
|
||||
assertEquals("abc", storedFields.document(td.scoreDocs[0].doc).get("field"));
|
||||
assertEquals("ABC", storedFields.document(td.scoreDocs[1].doc).get("field"));
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
@ -118,8 +120,9 @@ public class TestICUCollationDocValuesField extends LuceneTestCase {
|
|||
Collator collator)
|
||||
throws Exception {
|
||||
SortedDocValues dvs = MultiDocValues.getSortedValues(is.getIndexReader(), "collated");
|
||||
StoredFields storedFields = is.storedFields();
|
||||
for (int docID = 0; docID < is.getIndexReader().maxDoc(); docID++) {
|
||||
Document doc = is.doc(docID);
|
||||
Document doc = storedFields.document(docID);
|
||||
String s = doc.getField("field").stringValue();
|
||||
boolean collatorAccepts =
|
||||
collator.compare(s, startPoint) >= 0 && collator.compare(s, endPoint) <= 0;
|
||||
|
|
|
@ -692,7 +692,7 @@ public final class Lucene50CompressingStoredFieldsReader extends StoredFieldsRea
|
|||
}
|
||||
}
|
||||
|
||||
SerializedDocument document(int docID) throws IOException {
|
||||
SerializedDocument serializedDocument(int docID) throws IOException {
|
||||
if (state.contains(docID) == false) {
|
||||
fieldsStream.seek(indexReader.getStartPointer(docID));
|
||||
state.reset(docID);
|
||||
|
@ -702,9 +702,9 @@ public final class Lucene50CompressingStoredFieldsReader extends StoredFieldsRea
|
|||
}
|
||||
|
||||
@Override
|
||||
public void visitDocument(int docID, StoredFieldVisitor visitor) throws IOException {
|
||||
public void document(int docID, StoredFieldVisitor visitor) throws IOException {
|
||||
|
||||
final SerializedDocument doc = document(docID);
|
||||
final SerializedDocument doc = serializedDocument(docID);
|
||||
|
||||
for (int fieldIDX = 0; fieldIDX < doc.numStoredFields; fieldIDX++) {
|
||||
final long infoAndBits = doc.in.readVLong();
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.lucene.document.StoredField;
|
|||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.StoredFields;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.tests.index.BaseStoredFieldsFormatTestCase;
|
||||
import org.apache.lucene.tests.util.LuceneTestCase.Nightly;
|
||||
|
@ -58,8 +59,9 @@ public class TestLucene50StoredFieldsFormatHighCompression extends BaseStoredFie
|
|||
|
||||
DirectoryReader ir = DirectoryReader.open(dir);
|
||||
assertEquals(10, ir.numDocs());
|
||||
StoredFields storedFields = ir.storedFields();
|
||||
for (int i = 0; i < 10; i++) {
|
||||
Document doc = ir.document(i);
|
||||
Document doc = storedFields.document(i);
|
||||
assertEquals("value1", doc.get("field1"));
|
||||
assertEquals("value2", doc.get("field2"));
|
||||
}
|
||||
|
|
|
@ -52,6 +52,7 @@ import org.apache.lucene.index.SerialMergeScheduler;
|
|||
import org.apache.lucene.index.SortedDocValues;
|
||||
import org.apache.lucene.index.SortedNumericDocValues;
|
||||
import org.apache.lucene.index.SortedSetDocValues;
|
||||
import org.apache.lucene.index.StoredFields;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
|
@ -225,8 +226,9 @@ public abstract class BaseLucene80DocValuesFormatTestCase
|
|||
|
||||
final SortedSetDocValues sortedSet = DocValues.getSortedSet(reader, "sorted_set");
|
||||
|
||||
StoredFields storedFields = reader.storedFields();
|
||||
for (int i = 0; i < reader.maxDoc(); ++i) {
|
||||
final Document doc = reader.document(i);
|
||||
final Document doc = storedFields.document(i);
|
||||
final IndexableField valueField = doc.getField("value");
|
||||
final Long value = valueField == null ? null : valueField.numericValue().longValue();
|
||||
|
||||
|
@ -664,11 +666,12 @@ public abstract class BaseLucene80DocValuesFormatTestCase
|
|||
for (LeafReaderContext context : ir.leaves()) {
|
||||
LeafReader r = context.reader();
|
||||
SortedNumericDocValues docValues = DocValues.getSortedNumeric(r, "dv");
|
||||
StoredFields storedFields = r.storedFields();
|
||||
for (int i = 0; i < r.maxDoc(); i++) {
|
||||
if (i > docValues.docID()) {
|
||||
docValues.nextDoc();
|
||||
}
|
||||
String[] expectedStored = r.document(i).getValues("stored");
|
||||
String[] expectedStored = storedFields.document(i).getValues("stored");
|
||||
if (i < docValues.docID()) {
|
||||
assertEquals(0, expectedStored.length);
|
||||
} else {
|
||||
|
@ -736,6 +739,7 @@ public abstract class BaseLucene80DocValuesFormatTestCase
|
|||
TestUtil.checkReader(ir);
|
||||
for (LeafReaderContext context : ir.leaves()) {
|
||||
LeafReader r = context.reader();
|
||||
StoredFields storedFields = r.storedFields();
|
||||
|
||||
for (int jump = jumpStep; jump < r.maxDoc(); jump += jumpStep) {
|
||||
// Create a new instance each time to ensure jumps from the beginning
|
||||
|
@ -750,7 +754,7 @@ public abstract class BaseLucene80DocValuesFormatTestCase
|
|||
+ jump
|
||||
+ " from #"
|
||||
+ (docID - jump);
|
||||
String storedValue = r.document(docID).get("stored");
|
||||
String storedValue = storedFields.document(docID).get("stored");
|
||||
if (storedValue == null) {
|
||||
assertFalse("There should be no DocValue for " + base, docValues.advanceExact(docID));
|
||||
} else {
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.apache.lucene.document.StoredField;
|
|||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.StoredFields;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.tests.index.BaseStoredFieldsFormatTestCase;
|
||||
import org.apache.lucene.tests.util.LuceneTestCase.Nightly;
|
||||
|
@ -57,8 +58,9 @@ public class TestLucene87StoredFieldsFormatHighCompression extends BaseStoredFie
|
|||
|
||||
DirectoryReader ir = DirectoryReader.open(dir);
|
||||
assertEquals(10, ir.numDocs());
|
||||
StoredFields storedFields = ir.storedFields();
|
||||
for (int i = 0; i < 10; i++) {
|
||||
Document doc = ir.document(i);
|
||||
Document doc = storedFields.document(i);
|
||||
assertEquals("value1", doc.get("field1"));
|
||||
assertEquals("value2", doc.get("field2"));
|
||||
}
|
||||
|
|
|
@ -91,7 +91,9 @@ import org.apache.lucene.index.SortedDocValues;
|
|||
import org.apache.lucene.index.SortedNumericDocValues;
|
||||
import org.apache.lucene.index.SortedSetDocValues;
|
||||
import org.apache.lucene.index.StandardDirectoryReader;
|
||||
import org.apache.lucene.index.StoredFields;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.TermVectors;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.index.VectorSimilarityFunction;
|
||||
|
@ -1099,9 +1101,11 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
throws IOException {
|
||||
final int hitCount = hits.length;
|
||||
assertEquals("wrong number of hits", expectedCount, hitCount);
|
||||
StoredFields storedFields = reader.storedFields();
|
||||
TermVectors termVectors = reader.termVectors();
|
||||
for (int i = 0; i < hitCount; i++) {
|
||||
reader.document(hits[i].doc);
|
||||
reader.getTermVectors(hits[i].doc);
|
||||
storedFields.document(hits[i].doc);
|
||||
termVectors.get(hits[i].doc);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1118,10 +1122,12 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
|
||||
final Bits liveDocs = MultiBits.getLiveDocs(reader);
|
||||
assertNotNull(liveDocs);
|
||||
StoredFields storedFields = reader.storedFields();
|
||||
TermVectors termVectors = reader.termVectors();
|
||||
|
||||
for (int i = 0; i < DOCS_COUNT; i++) {
|
||||
if (liveDocs.get(i)) {
|
||||
Document d = reader.document(i);
|
||||
Document d = storedFields.document(i);
|
||||
List<IndexableField> fields = d.getFields();
|
||||
boolean isProxDoc = d.getField("content3") == null;
|
||||
if (isProxDoc) {
|
||||
|
@ -1144,7 +1150,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
assertEquals("field with non-ascii name", f.stringValue());
|
||||
}
|
||||
|
||||
Fields tfvFields = reader.getTermVectors(i);
|
||||
Fields tfvFields = termVectors.get(i);
|
||||
assertNotNull("i=" + i, tfvFields);
|
||||
Terms tfv = tfvFields.terms("utf8");
|
||||
assertNotNull("docID=" + i + " index=" + oldName, tfv);
|
||||
|
@ -1176,7 +1182,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
MultiDocValues.getSortedNumericValues(reader, "dvSortedNumeric");
|
||||
|
||||
for (int i = 0; i < DOCS_COUNT; i++) {
|
||||
int id = Integer.parseInt(reader.document(i).get("id"));
|
||||
int id = Integer.parseInt(storedFields.document(i).get("id"));
|
||||
assertEquals(i, dvByte.nextDoc());
|
||||
assertEquals(id, dvByte.longValue());
|
||||
|
||||
|
@ -1231,7 +1237,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
searcher.search(new TermQuery(new Term(new String("content"), "aaa")), 1000).scoreDocs;
|
||||
|
||||
// First document should be #0
|
||||
Document d = searcher.getIndexReader().document(hits[0].doc);
|
||||
Document d = storedFields.document(hits[0].doc);
|
||||
assertEquals("didn't get the right document first", "0", d.get("id"));
|
||||
|
||||
doTestHits(hits, 34, searcher.getIndexReader());
|
||||
|
@ -1337,7 +1343,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
// test KNN search
|
||||
ScoreDoc[] scoreDocs = assertKNNSearch(searcher, KNN_VECTOR, 10, 10, "0");
|
||||
for (int i = 0; i < scoreDocs.length; i++) {
|
||||
int id = Integer.parseInt(reader.document(scoreDocs[i].doc).get("id"));
|
||||
int id = Integer.parseInt(storedFields.document(scoreDocs[i].doc).get("id"));
|
||||
int expectedId = i < DELETED_ID ? i : i + 1;
|
||||
assertEquals(expectedId, id);
|
||||
}
|
||||
|
@ -1356,7 +1362,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
ScoreDoc[] hits =
|
||||
searcher.search(new KnnVectorQuery(KNN_VECTOR_FIELD, queryVector, k), k).scoreDocs;
|
||||
assertEquals("wrong number of hits", expectedHitsCount, hits.length);
|
||||
Document d = searcher.doc(hits[0].doc);
|
||||
Document d = searcher.storedFields().document(hits[0].doc);
|
||||
assertEquals("wrong first document", expectedFirstDocId, d.get("id"));
|
||||
return hits;
|
||||
}
|
||||
|
@ -1388,7 +1394,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
IndexReader reader = DirectoryReader.open(dir);
|
||||
IndexSearcher searcher = newSearcher(reader);
|
||||
ScoreDoc[] hits = searcher.search(new TermQuery(new Term("content", "aaa")), 1000).scoreDocs;
|
||||
Document d = searcher.getIndexReader().document(hits[0].doc);
|
||||
Document d = searcher.getIndexReader().storedFields().document(hits[0].doc);
|
||||
assertEquals("wrong first document", "0", d.get("id"));
|
||||
doTestHits(hits, 44, searcher.getIndexReader());
|
||||
|
||||
|
@ -1420,7 +1426,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
// make sure searching sees right # hits fot term search
|
||||
hits = searcher.search(new TermQuery(new Term("content", "aaa")), 1000).scoreDocs;
|
||||
assertEquals("wrong number of hits", 44, hits.length);
|
||||
d = searcher.doc(hits[0].doc);
|
||||
d = searcher.storedFields().document(hits[0].doc);
|
||||
doTestHits(hits, 44, searcher.getIndexReader());
|
||||
assertEquals("wrong first document", "0", d.get("id"));
|
||||
|
||||
|
@ -1445,7 +1451,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
IndexSearcher searcher = newSearcher(reader);
|
||||
ScoreDoc[] hits = searcher.search(new TermQuery(new Term("content", "aaa")), 1000).scoreDocs;
|
||||
assertEquals("wrong number of hits", 34, hits.length);
|
||||
Document d = searcher.doc(hits[0].doc);
|
||||
Document d = searcher.storedFields().document(hits[0].doc);
|
||||
assertEquals("wrong first document", "0", d.get("id"));
|
||||
|
||||
if (nameVersion.major >= KNN_VECTOR_MIN_SUPPORTED_VERSION) {
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.apache.lucene.document.Document;
|
|||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.MultiBits;
|
||||
import org.apache.lucene.index.StoredFields;
|
||||
import org.apache.lucene.search.Collector;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Query;
|
||||
|
@ -82,13 +83,14 @@ public abstract class ReadTask extends PerfTask {
|
|||
closeSearcher = false;
|
||||
}
|
||||
|
||||
StoredFields storedFields = reader.storedFields();
|
||||
// optionally warm and add num docs traversed to count
|
||||
if (withWarm()) {
|
||||
Document doc = null;
|
||||
Bits liveDocs = MultiBits.getLiveDocs(reader);
|
||||
for (int m = 0; m < reader.maxDoc(); m++) {
|
||||
if (null == liveDocs || liveDocs.get(m)) {
|
||||
doc = reader.document(m);
|
||||
doc = storedFields.document(m);
|
||||
res += (doc == null ? 0 : 1);
|
||||
}
|
||||
}
|
||||
|
@ -129,7 +131,7 @@ public abstract class ReadTask extends PerfTask {
|
|||
System.out.println("numDocs() = " + reader.numDocs());
|
||||
for (int i = 0; i < hits.scoreDocs.length; i++) {
|
||||
final int docID = hits.scoreDocs[i].doc;
|
||||
final Document doc = reader.document(docID);
|
||||
final Document doc = storedFields.document(docID);
|
||||
System.out.println(
|
||||
" "
|
||||
+ i
|
||||
|
@ -159,19 +161,19 @@ public abstract class ReadTask extends PerfTask {
|
|||
}
|
||||
|
||||
protected int withTopDocs(IndexSearcher searcher, Query q, TopDocs hits) throws Exception {
|
||||
IndexReader reader = searcher.getIndexReader();
|
||||
int res = 0;
|
||||
if (withTraverse()) {
|
||||
final ScoreDoc[] scoreDocs = hits.scoreDocs;
|
||||
int traversalSize = Math.min(scoreDocs.length, traversalSize());
|
||||
|
||||
StoredFields storedFields = searcher.storedFields();
|
||||
if (traversalSize > 0) {
|
||||
boolean retrieve = withRetrieve();
|
||||
for (int m = 0; m < traversalSize; m++) {
|
||||
int id = scoreDocs[m].doc;
|
||||
res++;
|
||||
if (retrieve) {
|
||||
Document document = retrieveDoc(reader, id);
|
||||
Document document = retrieveDoc(storedFields, id);
|
||||
res += document != null ? 1 : 0;
|
||||
}
|
||||
}
|
||||
|
@ -184,8 +186,8 @@ public abstract class ReadTask extends PerfTask {
|
|||
return TopScoreDocCollector.create(numHits(), withTotalHits() ? Integer.MAX_VALUE : 1);
|
||||
}
|
||||
|
||||
protected Document retrieveDoc(IndexReader ir, int id) throws IOException {
|
||||
return ir.document(id);
|
||||
protected Document retrieveDoc(StoredFields storedFields, int id) throws IOException {
|
||||
return storedFields.document(id);
|
||||
}
|
||||
|
||||
/** Return query maker used for this task. */
|
||||
|
|
|
@ -31,6 +31,8 @@ import org.apache.lucene.document.Document;
|
|||
import org.apache.lucene.index.Fields;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.index.StoredFields;
|
||||
import org.apache.lucene.index.TermVectors;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
|
@ -196,12 +198,14 @@ public class SearchTravRetHighlightTask extends SearchTravTask {
|
|||
@Override
|
||||
public void withTopDocs(IndexSearcher searcher, Query q, TopDocs hits) throws Exception {
|
||||
IndexReader reader = searcher.getIndexReader();
|
||||
StoredFields storedFields = reader.storedFields();
|
||||
TermVectors termVectors = reader.termVectors();
|
||||
highlighter.setFragmentScorer(new QueryScorer(q));
|
||||
// highlighter.setTextFragmenter(); unfortunately no sentence mechanism, not even regex.
|
||||
// Default here is trivial
|
||||
for (ScoreDoc scoreDoc : docIdOrder(hits.scoreDocs)) {
|
||||
Document document = reader.document(scoreDoc.doc, hlFields);
|
||||
Fields tvFields = termVecs ? reader.getTermVectors(scoreDoc.doc) : null;
|
||||
Document document = storedFields.document(scoreDoc.doc, hlFields);
|
||||
Fields tvFields = termVecs ? termVectors.get(scoreDoc.doc) : null;
|
||||
for (IndexableField indexableField : document) {
|
||||
TokenStream tokenStream;
|
||||
if (termVecs) {
|
||||
|
@ -316,8 +320,10 @@ public class SearchTravRetHighlightTask extends SearchTravTask {
|
|||
@Override
|
||||
public void withTopDocs(IndexSearcher searcher, Query q, TopDocs hits) throws Exception {
|
||||
// just retrieve the HL fields
|
||||
StoredFields storedFields = searcher.storedFields();
|
||||
for (ScoreDoc scoreDoc : docIdOrder(hits.scoreDocs)) {
|
||||
preventOptimizeAway += searcher.doc(scoreDoc.doc, hlFields).iterator().hasNext() ? 2 : 1;
|
||||
preventOptimizeAway +=
|
||||
storedFields.document(scoreDoc.doc, hlFields).iterator().hasNext() ? 2 : 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ import java.util.StringTokenizer;
|
|||
import org.apache.lucene.benchmark.byTask.PerfRunData;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.DocumentStoredFieldVisitor;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.StoredFields;
|
||||
|
||||
/**
|
||||
* Search and Traverse and Retrieve docs task using a FieldVisitor loading only the requested
|
||||
|
@ -51,12 +51,12 @@ public class SearchTravRetLoadFieldSelectorTask extends SearchTravTask {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected Document retrieveDoc(IndexReader ir, int id) throws IOException {
|
||||
protected Document retrieveDoc(StoredFields storedFields, int id) throws IOException {
|
||||
if (fieldsToLoad == null) {
|
||||
return ir.document(id);
|
||||
return storedFields.document(id);
|
||||
} else {
|
||||
DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(fieldsToLoad);
|
||||
ir.document(id, visitor);
|
||||
storedFields.document(id, visitor);
|
||||
return visitor.getDocument();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ import java.io.IOException;
|
|||
import java.io.PrintWriter;
|
||||
import org.apache.lucene.benchmark.quality.utils.DocNameExtractor;
|
||||
import org.apache.lucene.benchmark.quality.utils.SubmissionReport;
|
||||
import org.apache.lucene.index.StoredFields;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
|
@ -128,8 +129,9 @@ public class QualityBenchmark {
|
|||
System.currentTimeMillis(); // extraction of first doc name we measure also construction of
|
||||
// doc name extractor, just in case.
|
||||
DocNameExtractor xt = new DocNameExtractor(docNameField);
|
||||
StoredFields storedFields = searcher.storedFields();
|
||||
for (int i = 0; i < sd.length; i++) {
|
||||
String docName = xt.docName(searcher, sd[i].doc);
|
||||
String docName = xt.docName(storedFields, sd[i].doc);
|
||||
long docNameExtractTime = System.currentTimeMillis() - t1;
|
||||
t1 = System.currentTimeMillis();
|
||||
boolean isRelevant = judge.isRelevant(docName, qq);
|
||||
|
|
|
@ -22,7 +22,7 @@ import java.util.List;
|
|||
import java.util.Objects;
|
||||
import org.apache.lucene.index.FieldInfo;
|
||||
import org.apache.lucene.index.StoredFieldVisitor;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.index.StoredFields;
|
||||
|
||||
/** Utility: extract doc names from an index */
|
||||
public class DocNameExtractor {
|
||||
|
@ -41,16 +41,14 @@ public class DocNameExtractor {
|
|||
/**
|
||||
* Extract the name of the input doc from the index.
|
||||
*
|
||||
* @param searcher access to the index.
|
||||
* @param storedFields access to the index.
|
||||
* @param docid ID of doc whose name is needed.
|
||||
* @return the name of the input doc as extracted from the index.
|
||||
* @throws IOException if cannot extract the doc name from the index.
|
||||
*/
|
||||
public String docName(IndexSearcher searcher, int docid) throws IOException {
|
||||
public String docName(StoredFields storedFields, int docid) throws IOException {
|
||||
final List<String> name = new ArrayList<>();
|
||||
searcher
|
||||
.getIndexReader()
|
||||
.document(
|
||||
storedFields.document(
|
||||
docid,
|
||||
new StoredFieldVisitor() {
|
||||
@Override
|
||||
|
|
|
@ -21,6 +21,7 @@ import java.io.PrintWriter;
|
|||
import java.text.NumberFormat;
|
||||
import java.util.Locale;
|
||||
import org.apache.lucene.benchmark.quality.QualityQuery;
|
||||
import org.apache.lucene.index.StoredFields;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
|
@ -66,8 +67,9 @@ public class SubmissionReport {
|
|||
ScoreDoc[] sd = td.scoreDocs;
|
||||
String sep = " \t ";
|
||||
DocNameExtractor xt = new DocNameExtractor(docNameField);
|
||||
StoredFields storedFields = searcher.storedFields();
|
||||
for (int i = 0; i < sd.length; i++) {
|
||||
String docName = xt.docName(searcher, sd[i].doc);
|
||||
String docName = xt.docName(storedFields, sd[i].doc);
|
||||
logger.println(
|
||||
qq.getQueryID()
|
||||
+ sep
|
||||
|
|
|
@ -48,6 +48,7 @@ import org.apache.lucene.index.MultiTerms;
|
|||
import org.apache.lucene.index.PostingsEnum;
|
||||
import org.apache.lucene.index.SegmentInfos;
|
||||
import org.apache.lucene.index.SerialMergeScheduler;
|
||||
import org.apache.lucene.index.StoredFields;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
|
@ -240,8 +241,9 @@ public class TestPerfTasksLogic extends BenchmarkTestCase {
|
|||
|
||||
final int maxDoc = r.maxDoc();
|
||||
assertEquals(1000, maxDoc);
|
||||
StoredFields storedFields = r.storedFields();
|
||||
for (int i = 0; i < 1000; i++) {
|
||||
assertNotNull("doc " + i + " has null country", r.document(i).getField("country"));
|
||||
assertNotNull("doc " + i + " has null country", storedFields.document(i).getField("country"));
|
||||
}
|
||||
r.close();
|
||||
}
|
||||
|
@ -644,7 +646,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase {
|
|||
writer.close();
|
||||
Directory dir = benchmark.getRunData().getDirectory();
|
||||
IndexReader reader = DirectoryReader.open(dir);
|
||||
Fields tfv = reader.getTermVectors(0);
|
||||
Fields tfv = reader.termVectors().get(0);
|
||||
assertNotNull(tfv);
|
||||
assertTrue(tfv.size() > 0);
|
||||
reader.close();
|
||||
|
|
|
@ -166,7 +166,8 @@ public class TestLineDocSource extends BenchmarkTestCase {
|
|||
if (storedField == null) {
|
||||
storedField = DocMaker.BODY_FIELD; // added to all docs and satisfies field-name == value
|
||||
}
|
||||
assertEquals("Wrong field value", storedField, searcher.doc(0).get(storedField));
|
||||
assertEquals(
|
||||
"Wrong field value", storedField, searcher.storedFields().document(0).get(storedField));
|
||||
} finally {
|
||||
IOUtils.close(reader, runData);
|
||||
}
|
||||
|
|
|
@ -28,7 +28,9 @@ import org.apache.lucene.document.Document;
|
|||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.index.MultiTerms;
|
||||
import org.apache.lucene.index.StoredFields;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.TermVectors;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
|
@ -132,9 +134,11 @@ public class BooleanPerceptronClassifier implements Classifier<Boolean> {
|
|||
if (query != null) {
|
||||
q.add(new BooleanClause(query, BooleanClause.Occur.MUST));
|
||||
}
|
||||
TermVectors termVectors = indexReader.termVectors();
|
||||
StoredFields storedFields = indexReader.storedFields();
|
||||
// run the search and use stored field values
|
||||
for (ScoreDoc scoreDoc : indexSearcher.search(q.build(), Integer.MAX_VALUE).scoreDocs) {
|
||||
Document doc = indexSearcher.doc(scoreDoc.doc);
|
||||
Document doc = storedFields.document(scoreDoc.doc);
|
||||
|
||||
IndexableField textField = doc.getField(textFieldName);
|
||||
|
||||
|
@ -150,7 +154,7 @@ public class BooleanPerceptronClassifier implements Classifier<Boolean> {
|
|||
double modifier = Math.signum(correctClass.compareTo(assignedClass));
|
||||
if (modifier != 0D) {
|
||||
updateWeights(
|
||||
indexReader,
|
||||
termVectors,
|
||||
scoreDoc.doc,
|
||||
assignedClass,
|
||||
weights,
|
||||
|
@ -164,7 +168,7 @@ public class BooleanPerceptronClassifier implements Classifier<Boolean> {
|
|||
}
|
||||
|
||||
private void updateWeights(
|
||||
IndexReader indexReader,
|
||||
TermVectors termVectors,
|
||||
int docId,
|
||||
Boolean assignedClass,
|
||||
SortedMap<String, Double> weights,
|
||||
|
@ -174,7 +178,7 @@ public class BooleanPerceptronClassifier implements Classifier<Boolean> {
|
|||
TermsEnum cte = textTerms.iterator();
|
||||
|
||||
// get the doc term vectors
|
||||
Terms terms = indexReader.getTermVector(docId, textFieldName);
|
||||
Terms terms = termVectors.get(docId, textFieldName);
|
||||
|
||||
if (terms == null) {
|
||||
throw new IOException("term vectors must be stored for field " + textFieldName);
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.lucene.classification.utils.NearestFuzzyQuery;
|
|||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.StoredFields;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
|
@ -159,8 +160,9 @@ public class KNearestFuzzyClassifier implements Classifier<BytesRef> {
|
|||
Map<BytesRef, Double> classBoosts =
|
||||
new HashMap<>(); // this is a boost based on class ranking positions in topDocs
|
||||
float maxScore = topDocs.totalHits.value == 0 ? Float.NaN : topDocs.scoreDocs[0].score;
|
||||
StoredFields storedFields = indexSearcher.storedFields();
|
||||
for (ScoreDoc scoreDoc : topDocs.scoreDocs) {
|
||||
IndexableField storableField = indexSearcher.doc(scoreDoc.doc).getField(classFieldName);
|
||||
IndexableField storableField = storedFields.document(scoreDoc.doc).getField(classFieldName);
|
||||
if (storableField != null) {
|
||||
BytesRef cl = new BytesRef(storableField.stringValue());
|
||||
// update count
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.lucene.analysis.Analyzer;
|
|||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.StoredFields;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.queries.mlt.MoreLikeThis;
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
|
@ -96,7 +97,8 @@ public class KNearestNeighborClassifier implements Classifier<BytesRef> {
|
|||
int minDocsFreq,
|
||||
int minTermFreq,
|
||||
String classFieldName,
|
||||
String... textFieldNames) {
|
||||
String... textFieldNames)
|
||||
throws IOException {
|
||||
this.textFieldNames = textFieldNames;
|
||||
this.classFieldName = classFieldName;
|
||||
this.mlt = new MoreLikeThis(indexReader);
|
||||
|
@ -191,8 +193,10 @@ public class KNearestNeighborClassifier implements Classifier<BytesRef> {
|
|||
Map<BytesRef, Double> classBoosts =
|
||||
new HashMap<>(); // this is a boost based on class ranking positions in topDocs
|
||||
float maxScore = topDocs.totalHits.value == 0 ? Float.NaN : topDocs.scoreDocs[0].score;
|
||||
StoredFields storedFields = indexSearcher.storedFields();
|
||||
for (ScoreDoc scoreDoc : topDocs.scoreDocs) {
|
||||
IndexableField[] storableFields = indexSearcher.doc(scoreDoc.doc).getFields(classFieldName);
|
||||
IndexableField[] storableFields =
|
||||
storedFields.document(scoreDoc.doc).getFields(classFieldName);
|
||||
for (IndexableField singleStorableField : storableFields) {
|
||||
if (singleStorableField != null) {
|
||||
BytesRef cl = new BytesRef(singleStorableField.stringValue());
|
||||
|
|
|
@ -75,7 +75,8 @@ public class KNearestNeighborDocumentClassifier extends KNearestNeighborClassifi
|
|||
int minTermFreq,
|
||||
String classFieldName,
|
||||
Map<String, Analyzer> field2analyzer,
|
||||
String... textFieldNames) {
|
||||
String... textFieldNames)
|
||||
throws IOException {
|
||||
super(
|
||||
indexReader,
|
||||
similarity,
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.apache.lucene.classification.ClassificationResult;
|
|||
import org.apache.lucene.classification.Classifier;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.StoredFields;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.search.TermRangeQuery;
|
||||
|
@ -80,13 +81,14 @@ public class ConfusionMatrixGenerator {
|
|||
double time = 0d;
|
||||
|
||||
int counter = 0;
|
||||
StoredFields storedFields = indexSearcher.storedFields();
|
||||
for (ScoreDoc scoreDoc : topDocs.scoreDocs) {
|
||||
|
||||
if (timeoutMilliseconds > 0 && time >= timeoutMilliseconds) {
|
||||
break;
|
||||
}
|
||||
|
||||
Document doc = reader.document(scoreDoc.doc);
|
||||
Document doc = storedFields.document(scoreDoc.doc);
|
||||
String[] correctAnswers = doc.getValues(classFieldName);
|
||||
|
||||
if (correctAnswers != null && correctAnswers.length > 0) {
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.lucene.index.IndexableField;
|
|||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.SortedDocValues;
|
||||
import org.apache.lucene.index.SortedSetDocValues;
|
||||
import org.apache.lucene.index.StoredFields;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
|
@ -134,6 +135,7 @@ public class DatasetSplitter {
|
|||
int b = 0;
|
||||
|
||||
// iterate over existing documents
|
||||
StoredFields storedFields = originalIndex.storedFields();
|
||||
for (GroupDocs<Object> group : topGroups.groups) {
|
||||
assert group.totalHits.relation == TotalHits.Relation.EQUAL_TO;
|
||||
long totalHits = group.totalHits.value;
|
||||
|
@ -144,7 +146,7 @@ public class DatasetSplitter {
|
|||
for (ScoreDoc scoreDoc : group.scoreDocs) {
|
||||
|
||||
// create a new document for indexing
|
||||
Document doc = createNewDoc(originalIndex, ft, scoreDoc, fieldNames);
|
||||
Document doc = createNewDoc(storedFields, ft, scoreDoc, fieldNames);
|
||||
|
||||
// add it to one of the IDXs
|
||||
if (b % 2 == 0 && tc < testSize) {
|
||||
|
@ -180,10 +182,10 @@ public class DatasetSplitter {
|
|||
}
|
||||
|
||||
private Document createNewDoc(
|
||||
IndexReader originalIndex, FieldType ft, ScoreDoc scoreDoc, String[] fieldNames)
|
||||
StoredFields originalFields, FieldType ft, ScoreDoc scoreDoc, String[] fieldNames)
|
||||
throws IOException {
|
||||
Document doc = new Document();
|
||||
Document document = originalIndex.document(scoreDoc.doc);
|
||||
Document document = originalFields.document(scoreDoc.doc);
|
||||
if (fieldNames != null && fieldNames.length > 0) {
|
||||
for (String fieldName : fieldNames) {
|
||||
IndexableField field = document.getField(fieldName);
|
||||
|
|
|
@ -22,6 +22,7 @@ import org.apache.lucene.document.FieldType;
|
|||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.MultiTerms;
|
||||
import org.apache.lucene.index.TermVectors;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||
|
@ -81,9 +82,10 @@ public class TestDocToDoubleVectorUtils extends LuceneTestCase {
|
|||
@Test
|
||||
public void testDenseFreqDoubleArrayConversion() throws Exception {
|
||||
IndexSearcher indexSearcher = new IndexSearcher(index);
|
||||
TermVectors termVectors = index.termVectors();
|
||||
for (ScoreDoc scoreDoc :
|
||||
indexSearcher.search(new MatchAllDocsQuery(), Integer.MAX_VALUE).scoreDocs) {
|
||||
Terms docTerms = index.getTermVector(scoreDoc.doc, "text");
|
||||
Terms docTerms = termVectors.get(scoreDoc.doc, "text");
|
||||
Double[] vector = DocToDoubleVectorUtils.toDenseLocalFreqDoubleArray(docTerms);
|
||||
assertNotNull(vector);
|
||||
assertTrue(vector.length > 0);
|
||||
|
@ -95,9 +97,10 @@ public class TestDocToDoubleVectorUtils extends LuceneTestCase {
|
|||
Terms fieldTerms = MultiTerms.getTerms(index, "text");
|
||||
if (fieldTerms != null && fieldTerms.size() != -1) {
|
||||
IndexSearcher indexSearcher = new IndexSearcher(index);
|
||||
TermVectors termVectors = index.termVectors();
|
||||
for (ScoreDoc scoreDoc :
|
||||
indexSearcher.search(new MatchAllDocsQuery(), Integer.MAX_VALUE).scoreDocs) {
|
||||
Terms docTerms = index.getTermVector(scoreDoc.doc, "text");
|
||||
Terms docTerms = termVectors.get(scoreDoc.doc, "text");
|
||||
Double[] vector = DocToDoubleVectorUtils.toSparseLocalFreqDoubleArray(docTerms, fieldTerms);
|
||||
assertNotNull(vector);
|
||||
assertTrue(vector.length > 0);
|
||||
|
|
|
@ -116,7 +116,7 @@ public class SimpleTextStoredFieldsReader extends StoredFieldsReader {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void visitDocument(int n, StoredFieldVisitor visitor) throws IOException {
|
||||
public void document(int n, StoredFieldVisitor visitor) throws IOException {
|
||||
in.seek(offsets[n]);
|
||||
|
||||
while (true) {
|
||||
|
|
|
@ -19,23 +19,21 @@ package org.apache.lucene.codecs;
|
|||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import org.apache.lucene.index.StoredFieldVisitor;
|
||||
import org.apache.lucene.index.StoredFields;
|
||||
|
||||
/**
|
||||
* Codec API for reading stored fields.
|
||||
*
|
||||
* <p>You need to implement {@link #visitDocument(int, StoredFieldVisitor)} to read the stored
|
||||
* fields for a document, implement {@link #clone()} (creating clones of any IndexInputs used, etc),
|
||||
* and {@link #close()}
|
||||
* <p>You need to implement {@link #document(int, StoredFieldVisitor)} to read the stored fields for
|
||||
* a document, implement {@link #clone()} (creating clones of any IndexInputs used, etc), and {@link
|
||||
* #close()}
|
||||
*
|
||||
* @lucene.experimental
|
||||
*/
|
||||
public abstract class StoredFieldsReader implements Cloneable, Closeable {
|
||||
public abstract class StoredFieldsReader extends StoredFields implements Cloneable, Closeable {
|
||||
/** Sole constructor. (For invocation by subclass constructors, typically implicit.) */
|
||||
protected StoredFieldsReader() {}
|
||||
|
||||
/** Visit the stored fields for document <code>docID</code> */
|
||||
public abstract void visitDocument(int docID, StoredFieldVisitor visitor) throws IOException;
|
||||
|
||||
@Override
|
||||
public abstract StoredFieldsReader clone();
|
||||
|
||||
|
|
|
@ -132,7 +132,7 @@ public abstract class StoredFieldsWriter implements Closeable, Accountable {
|
|||
}
|
||||
assert sub.mappedDocID == docCount;
|
||||
startDocument();
|
||||
sub.reader.visitDocument(sub.docID, sub.visitor);
|
||||
sub.reader.document(sub.docID, sub.visitor);
|
||||
finishDocument();
|
||||
docCount++;
|
||||
}
|
||||
|
@ -149,7 +149,7 @@ public abstract class StoredFieldsWriter implements Closeable, Accountable {
|
|||
* MergeVisitor visitor = new MergeVisitor(mergeState, readerIndex);
|
||||
* for (...) {
|
||||
* startDocument();
|
||||
* storedFieldsReader.visitDocument(docID, visitor);
|
||||
* storedFieldsReader.document(docID, visitor);
|
||||
* finishDocument();
|
||||
* }
|
||||
* </pre>
|
||||
|
|
|
@ -18,26 +18,18 @@ package org.apache.lucene.codecs;
|
|||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; // javadocs
|
||||
import org.apache.lucene.index.Fields;
|
||||
import org.apache.lucene.index.TermVectors;
|
||||
|
||||
/**
|
||||
* Codec API for reading term vectors:
|
||||
*
|
||||
* @lucene.experimental
|
||||
*/
|
||||
public abstract class TermVectorsReader implements Cloneable, Closeable {
|
||||
public abstract class TermVectorsReader extends TermVectors implements Cloneable, Closeable {
|
||||
|
||||
/** Sole constructor. (For invocation by subclass constructors, typically implicit.) */
|
||||
protected TermVectorsReader() {}
|
||||
|
||||
/**
|
||||
* Returns term vectors for this document, or null if term vectors were not indexed. If offsets
|
||||
* are available they are in an {@link OffsetAttribute} available from the {@link
|
||||
* org.apache.lucene.index.PostingsEnum}.
|
||||
*/
|
||||
public abstract Fields get(int doc) throws IOException;
|
||||
|
||||
/**
|
||||
* Checks consistency of this reader.
|
||||
*
|
||||
|
|
|
@ -603,7 +603,7 @@ public final class Lucene90CompressingStoredFieldsReader extends StoredFieldsRea
|
|||
}
|
||||
}
|
||||
|
||||
SerializedDocument document(int docID) throws IOException {
|
||||
SerializedDocument serializedDocument(int docID) throws IOException {
|
||||
if (state.contains(docID) == false) {
|
||||
fieldsStream.seek(indexReader.getStartPointer(docID));
|
||||
state.reset(docID);
|
||||
|
@ -625,9 +625,9 @@ public final class Lucene90CompressingStoredFieldsReader extends StoredFieldsRea
|
|||
}
|
||||
|
||||
@Override
|
||||
public void visitDocument(int docID, StoredFieldVisitor visitor) throws IOException {
|
||||
public void document(int docID, StoredFieldVisitor visitor) throws IOException {
|
||||
|
||||
final SerializedDocument doc = document(docID);
|
||||
final SerializedDocument doc = serializedDocument(docID);
|
||||
|
||||
for (int fieldIDX = 0; fieldIDX < doc.numStoredFields; fieldIDX++) {
|
||||
final long infoAndBits = doc.in.readVLong();
|
||||
|
|
|
@ -513,7 +513,7 @@ public final class Lucene90CompressingStoredFieldsWriter extends StoredFieldsWri
|
|||
private void copyOneDoc(Lucene90CompressingStoredFieldsReader reader, int docID)
|
||||
throws IOException {
|
||||
assert reader.getVersion() == VERSION_CURRENT;
|
||||
SerializedDocument doc = reader.document(docID);
|
||||
SerializedDocument doc = reader.serializedDocument(docID);
|
||||
startDocument();
|
||||
bufferedDocs.copyBytes(doc.in, doc.length);
|
||||
numStoredFieldsInDoc = doc.numStoredFields;
|
||||
|
@ -641,7 +641,7 @@ public final class Lucene90CompressingStoredFieldsWriter extends StoredFieldsWri
|
|||
} else if (sub.mergeStrategy == MergeStrategy.VISITOR) {
|
||||
assert visitors[sub.readerIndex] != null;
|
||||
startDocument();
|
||||
reader.visitDocument(sub.docID, visitors[sub.readerIndex]);
|
||||
reader.document(sub.docID, visitors[sub.readerIndex]);
|
||||
finishDocument();
|
||||
++docCount;
|
||||
sub = docIDMerger.next();
|
||||
|
|
|
@ -20,9 +20,8 @@ import java.util.ArrayList;
|
|||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import org.apache.lucene.index.IndexReader; // for javadoc
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.search.IndexSearcher; // for javadoc
|
||||
import org.apache.lucene.index.StoredFields; // for javadoc
|
||||
import org.apache.lucene.search.ScoreDoc; // for javadoc
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
||||
|
@ -36,7 +35,7 @@ import org.apache.lucene.util.BytesRef;
|
|||
*
|
||||
* <p>Note that fields which are <i>not</i> {@link
|
||||
* org.apache.lucene.index.IndexableFieldType#stored() stored} are <i>not</i> available in documents
|
||||
* retrieved from the index, e.g. with {@link ScoreDoc#doc} or {@link IndexReader#document(int)}.
|
||||
* retrieved from the index, e.g. with {@link ScoreDoc#doc} or {@link StoredFields#document(int)}.
|
||||
*/
|
||||
public final class Document implements Iterable<IndexableField> {
|
||||
|
||||
|
@ -180,7 +179,7 @@ public final class Document implements Iterable<IndexableField> {
|
|||
* Returns a List of all the fields in a document.
|
||||
*
|
||||
* <p>Note that fields which are <i>not</i> stored are <i>not</i> available in documents retrieved
|
||||
* from the index, e.g. {@link IndexSearcher#doc(int)} or {@link IndexReader#document(int)}.
|
||||
* from the index, e.g. {@link StoredFields#document(int)}.
|
||||
*
|
||||
* @return an immutable <code>List<Field></code>
|
||||
*/
|
||||
|
|
|
@ -22,8 +22,8 @@ import java.util.HashSet;
|
|||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import org.apache.lucene.index.FieldInfo;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.StoredFieldVisitor;
|
||||
import org.apache.lucene.index.StoredFields;
|
||||
|
||||
/**
|
||||
* A {@link StoredFieldVisitor} that creates a {@link Document} from stored fields.
|
||||
|
@ -31,7 +31,7 @@ import org.apache.lucene.index.StoredFieldVisitor;
|
|||
* <p>This visitor supports loading all stored fields, or only specific requested fields provided
|
||||
* from a {@link Set}.
|
||||
*
|
||||
* <p>This is used by {@link IndexReader#document(int)} to load a document.
|
||||
* <p>This is used by {@link StoredFields#document(int)} to load a document.
|
||||
*
|
||||
* @lucene.experimental
|
||||
*/
|
||||
|
|
|
@ -21,8 +21,8 @@ import org.apache.lucene.search.IndexSearcher; // javadocs
|
|||
import org.apache.lucene.util.BytesRef;
|
||||
|
||||
/**
|
||||
* A field whose value is stored so that {@link IndexSearcher#doc} and {@link IndexReader#document
|
||||
* IndexReader.document()} will return the field and its value.
|
||||
* A field whose value is stored so that {@link IndexSearcher#storedFields} and {@link
|
||||
* IndexReader#storedFields} will return the field and its value.
|
||||
*/
|
||||
public class StoredField extends Field {
|
||||
|
||||
|
|
|
@ -118,6 +118,23 @@ public abstract class BaseCompositeReader<R extends IndexReader> extends Composi
|
|||
return subReaders[i].getTermVectors(docID - starts[i]); // dispatch to subreader
|
||||
}
|
||||
|
||||
@Override
|
||||
public final TermVectors termVectors() throws IOException {
|
||||
ensureOpen();
|
||||
TermVectors[] subVectors = new TermVectors[subReaders.length];
|
||||
return new TermVectors() {
|
||||
@Override
|
||||
public Fields get(int docID) throws IOException {
|
||||
final int i = readerIndex(docID); // find subreader num
|
||||
// dispatch to subreader, reusing if possible
|
||||
if (subVectors[i] == null) {
|
||||
subVectors[i] = subReaders[i].termVectors();
|
||||
}
|
||||
return subVectors[i].get(docID - starts[i]);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public final int numDocs() {
|
||||
// Don't call ensureOpen() here (it could affect performance)
|
||||
|
@ -154,6 +171,23 @@ public abstract class BaseCompositeReader<R extends IndexReader> extends Composi
|
|||
subReaders[i].document(docID - starts[i], visitor); // dispatch to subreader
|
||||
}
|
||||
|
||||
@Override
|
||||
public final StoredFields storedFields() throws IOException {
|
||||
ensureOpen();
|
||||
StoredFields[] subFields = new StoredFields[subReaders.length];
|
||||
return new StoredFields() {
|
||||
@Override
|
||||
public void document(int docID, StoredFieldVisitor visitor) throws IOException {
|
||||
final int i = readerIndex(docID); // find subreader num
|
||||
// dispatch to subreader, reusing if possible
|
||||
if (subFields[i] == null) {
|
||||
subFields[i] = subReaders[i].storedFields();
|
||||
}
|
||||
subFields[i].document(docID - starts[i], visitor);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public final int docFreq(Term term) throws IOException {
|
||||
ensureOpen();
|
||||
|
|
|
@ -3001,7 +3001,7 @@ public final class CheckIndex implements Closeable {
|
|||
// Intentionally pull even deleted documents to
|
||||
// make sure they too are not corrupt:
|
||||
DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor();
|
||||
storedFields.visitDocument(j, visitor);
|
||||
storedFields.document(j, visitor);
|
||||
Document doc = visitor.getDocument();
|
||||
if (liveDocs == null || liveDocs.get(j)) {
|
||||
status.docCount++;
|
||||
|
|
|
@ -35,14 +35,14 @@ public abstract class CodecReader extends LeafReader {
|
|||
protected CodecReader() {}
|
||||
|
||||
/**
|
||||
* Expert: retrieve thread-private StoredFieldsReader
|
||||
* Expert: retrieve underlying StoredFieldsReader
|
||||
*
|
||||
* @lucene.internal
|
||||
*/
|
||||
public abstract StoredFieldsReader getFieldsReader();
|
||||
|
||||
/**
|
||||
* Expert: retrieve thread-private TermVectorsReader
|
||||
* Expert: retrieve underlying TermVectorsReader
|
||||
*
|
||||
* @lucene.internal
|
||||
*/
|
||||
|
@ -83,24 +83,42 @@ public abstract class CodecReader extends LeafReader {
|
|||
*/
|
||||
public abstract KnnVectorsReader getVectorReader();
|
||||
|
||||
// intentionally throw UOE for deprecated APIs: keep CodecReader clean!
|
||||
// (IndexWriter should not be triggering threadlocals in any way)
|
||||
|
||||
@Override
|
||||
public final void document(int docID, StoredFieldVisitor visitor) throws IOException {
|
||||
checkBounds(docID);
|
||||
getFieldsReader().visitDocument(docID, visitor);
|
||||
@Deprecated
|
||||
public void document(int docID, StoredFieldVisitor visitor) throws IOException {
|
||||
throw new UnsupportedOperationException("deprecated document access is not supported");
|
||||
}
|
||||
|
||||
@Override
|
||||
public final Fields getTermVectors(int docID) throws IOException {
|
||||
TermVectorsReader termVectorsReader = getTermVectorsReader();
|
||||
if (termVectorsReader == null) {
|
||||
return null;
|
||||
}
|
||||
checkBounds(docID);
|
||||
return termVectorsReader.get(docID);
|
||||
@Deprecated
|
||||
public Fields getTermVectors(int docID) throws IOException {
|
||||
throw new UnsupportedOperationException("deprecated term vector access is not supported");
|
||||
}
|
||||
|
||||
private void checkBounds(int docID) {
|
||||
@Override
|
||||
public final StoredFields storedFields() throws IOException {
|
||||
final StoredFields reader = getFieldsReader();
|
||||
return new StoredFields() {
|
||||
@Override
|
||||
public void document(int docID, StoredFieldVisitor visitor) throws IOException {
|
||||
// Don't trust the codec to do proper checks
|
||||
Objects.checkIndex(docID, maxDoc());
|
||||
reader.document(docID, visitor);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public final TermVectors termVectors() throws IOException {
|
||||
TermVectorsReader reader = getTermVectorsReader();
|
||||
if (reader == null) {
|
||||
return TermVectors.EMPTY;
|
||||
} else {
|
||||
return reader;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -73,6 +73,11 @@ abstract class DocValuesLeafReader extends LeafReader {
|
|||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public final TermVectors termVectors() throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public final int numDocs() {
|
||||
throw new UnsupportedOperationException();
|
||||
|
@ -88,6 +93,11 @@ abstract class DocValuesLeafReader extends LeafReader {
|
|||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public final StoredFields storedFields() throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected final void doClose() throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
|
|
|
@ -84,6 +84,18 @@ public abstract class FilterCodecReader extends CodecReader {
|
|||
return in.getPostingsReader();
|
||||
}
|
||||
|
||||
@Override
|
||||
@Deprecated
|
||||
public void document(int docID, StoredFieldVisitor visitor) throws IOException {
|
||||
in.document(docID, visitor);
|
||||
}
|
||||
|
||||
@Override
|
||||
@Deprecated
|
||||
public Fields getTermVectors(int docID) throws IOException {
|
||||
return in.getTermVectors(docID);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Bits getLiveDocs() {
|
||||
return in.getLiveDocs();
|
||||
|
|
|
@ -363,6 +363,12 @@ public abstract class FilterLeafReader extends LeafReader {
|
|||
return in.getTermVectors(docID);
|
||||
}
|
||||
|
||||
@Override
|
||||
public TermVectors termVectors() throws IOException {
|
||||
ensureOpen();
|
||||
return in.termVectors();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int numDocs() {
|
||||
// Don't call ensureOpen() here (it could affect performance)
|
||||
|
@ -375,6 +381,12 @@ public abstract class FilterLeafReader extends LeafReader {
|
|||
return in.maxDoc();
|
||||
}
|
||||
|
||||
@Override
|
||||
public StoredFields storedFields() throws IOException {
|
||||
ensureOpen();
|
||||
return in.storedFields();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void document(int docID, StoredFieldVisitor visitor) throws IOException {
|
||||
ensureOpen();
|
||||
|
|
|
@ -311,13 +311,19 @@ public abstract class IndexReader implements Closeable {
|
|||
/**
|
||||
* Retrieve term vectors for this document, or null if term vectors were not indexed. The returned
|
||||
* Fields instance acts like a single-document inverted index (the docID will be 0).
|
||||
*
|
||||
* @deprecated use {@link #termVectors()} to retrieve one or more documents
|
||||
*/
|
||||
@Deprecated
|
||||
public abstract Fields getTermVectors(int docID) throws IOException;
|
||||
|
||||
/**
|
||||
* Retrieve term vector for this document and field, or null if term vectors were not indexed. The
|
||||
* returned Fields instance acts like a single-document inverted index (the docID will be 0).
|
||||
*
|
||||
* @deprecated use {@link #termVectors()} to retrieve one or more documents
|
||||
*/
|
||||
@Deprecated
|
||||
public final Terms getTermVector(int docID, String field) throws IOException {
|
||||
Fields vectors = getTermVectors(docID);
|
||||
if (vectors == null) {
|
||||
|
@ -326,6 +332,26 @@ public abstract class IndexReader implements Closeable {
|
|||
return vectors.terms(field);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a {@link TermVectors} reader for the term vectors of this index.
|
||||
*
|
||||
* <p>This call never returns {@code null}, even if no term vectors were indexed. The returned
|
||||
* instance should only be used by a single thread.
|
||||
*
|
||||
* <p>Example:
|
||||
*
|
||||
* <pre class="prettyprint">
|
||||
* TopDocs hits = searcher.search(query, 10);
|
||||
* TermVectors termVectors = reader.termVectors();
|
||||
* for (ScoreDoc hit : hits.scoreDocs) {
|
||||
* Fields vector = termVectors.get(hit.doc);
|
||||
* }
|
||||
* </pre>
|
||||
*
|
||||
* @throws IOException If there is a low-level IO error
|
||||
*/
|
||||
public abstract TermVectors termVectors() throws IOException;
|
||||
|
||||
/**
|
||||
* Returns the number of documents in this index.
|
||||
*
|
||||
|
@ -354,7 +380,10 @@ public abstract class IndexReader implements Closeable {
|
|||
* Expert: visits the fields of a stored document, for custom processing/loading of each field. If
|
||||
* you simply want to load all fields, use {@link #document(int)}. If you want to load a subset,
|
||||
* use {@link DocumentStoredFieldVisitor}.
|
||||
*
|
||||
* @deprecated use {@link #storedFields()} to retrieve one or more documents
|
||||
*/
|
||||
@Deprecated
|
||||
public abstract void document(int docID, StoredFieldVisitor visitor) throws IOException;
|
||||
|
||||
/**
|
||||
|
@ -371,10 +400,12 @@ public abstract class IndexReader implements Closeable {
|
|||
*
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws IOException if there is a low-level IO error
|
||||
* @deprecated use {@link #storedFields()} to retrieve one or more documents
|
||||
*/
|
||||
// TODO: we need a separate StoredField, so that the
|
||||
// Document returned here contains that class not
|
||||
// IndexableField
|
||||
@Deprecated
|
||||
public final Document document(int docID) throws IOException {
|
||||
final DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor();
|
||||
document(docID, visitor);
|
||||
|
@ -384,13 +415,36 @@ public abstract class IndexReader implements Closeable {
|
|||
/**
|
||||
* Like {@link #document(int)} but only loads the specified fields. Note that this is simply sugar
|
||||
* for {@link DocumentStoredFieldVisitor#DocumentStoredFieldVisitor(Set)}.
|
||||
*
|
||||
* @deprecated use {@link #storedFields()} to retrieve one or more documents
|
||||
*/
|
||||
@Deprecated
|
||||
public final Document document(int docID, Set<String> fieldsToLoad) throws IOException {
|
||||
final DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(fieldsToLoad);
|
||||
document(docID, visitor);
|
||||
return visitor.getDocument();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a {@link StoredFields} reader for the stored fields of this index.
|
||||
*
|
||||
* <p>This call never returns {@code null}, even if no stored fields were indexed. The returned
|
||||
* instance should only be used by a single thread.
|
||||
*
|
||||
* <p>Example:
|
||||
*
|
||||
* <pre class="prettyprint">
|
||||
* TopDocs hits = searcher.search(query, 10);
|
||||
* StoredFields storedFields = reader.storedFields();
|
||||
* for (ScoreDoc hit : hits.scoreDocs) {
|
||||
* Document doc = storedFields.document(hit.doc);
|
||||
* }
|
||||
* </pre>
|
||||
*
|
||||
* @throws IOException If there is a low-level IO error
|
||||
*/
|
||||
public abstract StoredFields storedFields() throws IOException;
|
||||
|
||||
/**
|
||||
* Returns true if any documents have been deleted. Implementers should consider overriding this
|
||||
* method if {@link #maxDoc()} or {@link #numDocs()} are not constant-time operations.
|
||||
|
|
|
@ -42,7 +42,7 @@ public interface IndexableFieldType {
|
|||
* True if this field's indexed form should be also stored into term vectors.
|
||||
*
|
||||
* <p>This builds a miniature inverted-index for this field which can be accessed in a
|
||||
* document-oriented way from {@link IndexReader#getTermVector(int,String)}.
|
||||
* document-oriented way from {@link TermVectors#get(int,String)}.
|
||||
*
|
||||
* <p>This option is illegal if {@link #indexOptions()} returns IndexOptions.NONE.
|
||||
*/
|
||||
|
|
|
@ -273,6 +273,23 @@ public class ParallelLeafReader extends LeafReader {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public StoredFields storedFields() throws IOException {
|
||||
ensureOpen();
|
||||
StoredFields[] fields = new StoredFields[storedFieldsReaders.length];
|
||||
for (int i = 0; i < fields.length; i++) {
|
||||
fields[i] = storedFieldsReaders[i].storedFields();
|
||||
}
|
||||
return new StoredFields() {
|
||||
@Override
|
||||
public void document(int docID, StoredFieldVisitor visitor) throws IOException {
|
||||
for (StoredFields reader : fields) {
|
||||
reader.document(docID, visitor);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public CacheHelper getCoreCacheHelper() {
|
||||
// ParallelReader instances can be short-lived, which would make caching trappy
|
||||
|
@ -317,6 +334,30 @@ public class ParallelLeafReader extends LeafReader {
|
|||
return fields;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TermVectors termVectors() throws IOException {
|
||||
ensureOpen();
|
||||
// TODO: optimize
|
||||
return new TermVectors() {
|
||||
@Override
|
||||
public Fields get(int docID) throws IOException {
|
||||
ParallelFields fields = null;
|
||||
for (Map.Entry<String, LeafReader> ent : tvFieldToReader.entrySet()) {
|
||||
String fieldName = ent.getKey();
|
||||
Terms vector = ent.getValue().termVectors().get(docID, fieldName);
|
||||
if (vector != null) {
|
||||
if (fields == null) {
|
||||
fields = new ParallelFields();
|
||||
}
|
||||
fields.addField(fieldName, vector);
|
||||
}
|
||||
}
|
||||
|
||||
return fields;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
protected synchronized void doClose() throws IOException {
|
||||
IOException ioe = null;
|
||||
|
|
|
@ -18,6 +18,7 @@ package org.apache.lucene.index;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CopyOnWriteArraySet;
|
||||
import org.apache.lucene.codecs.Codec;
|
||||
|
@ -244,16 +245,52 @@ public final class SegmentReader extends CodecReader {
|
|||
return si.info.maxDoc();
|
||||
}
|
||||
|
||||
/* Support for deprecated threadlocal document/vectors APIs */
|
||||
|
||||
@Override
|
||||
public final void document(int docID, StoredFieldVisitor visitor) throws IOException {
|
||||
Objects.checkIndex(docID, maxDoc());
|
||||
getThreadLocalFieldsReader().document(docID, visitor);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final Fields getTermVectors(int docID) throws IOException {
|
||||
Objects.checkIndex(docID, maxDoc());
|
||||
TermVectorsReader termVectorsReader = getThreadLocalTermVectorsReader();
|
||||
if (termVectorsReader == null) {
|
||||
return null;
|
||||
}
|
||||
return termVectorsReader.get(docID);
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
private TermVectorsReader getThreadLocalTermVectorsReader() {
|
||||
ensureOpen();
|
||||
return core.termVectorsLocal.get();
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
private StoredFieldsReader getThreadLocalFieldsReader() {
|
||||
ensureOpen();
|
||||
return core.fieldsReaderLocal.get();
|
||||
}
|
||||
|
||||
/* end support for deprecated threadlocal document/vectors APIs */
|
||||
|
||||
@Override
|
||||
public TermVectorsReader getTermVectorsReader() {
|
||||
ensureOpen();
|
||||
return core.termVectorsLocal.get();
|
||||
if (core.termVectorsReaderOrig == null) {
|
||||
return null;
|
||||
} else {
|
||||
return core.termVectorsReaderOrig.clone();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public StoredFieldsReader getFieldsReader() {
|
||||
ensureOpen();
|
||||
return core.fieldsReaderLocal.get();
|
||||
return core.fieldsReaderOrig.clone();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -75,8 +75,8 @@ public class SimpleMergedSegmentWarmer implements IndexReaderWarmer {
|
|||
}
|
||||
}
|
||||
|
||||
reader.document(0);
|
||||
reader.getTermVectors(0);
|
||||
reader.storedFields().document(0);
|
||||
reader.termVectors().get(0);
|
||||
|
||||
if (infoStream.isEnabled("SMSW")) {
|
||||
infoStream.message(
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
package org.apache.lucene.index;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.UncheckedIOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
|
@ -244,10 +245,16 @@ public final class SlowCodecReaderWrapper {
|
|||
}
|
||||
|
||||
private static StoredFieldsReader readerToStoredFieldsReader(final LeafReader reader) {
|
||||
final StoredFields storedFields;
|
||||
try {
|
||||
storedFields = reader.storedFields();
|
||||
} catch (IOException e) {
|
||||
throw new UncheckedIOException(e);
|
||||
}
|
||||
return new StoredFieldsReader() {
|
||||
@Override
|
||||
public void visitDocument(int docID, StoredFieldVisitor visitor) throws IOException {
|
||||
reader.document(docID, visitor);
|
||||
public void document(int docID, StoredFieldVisitor visitor) throws IOException {
|
||||
storedFields.document(docID, visitor);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -266,10 +273,16 @@ public final class SlowCodecReaderWrapper {
|
|||
}
|
||||
|
||||
private static TermVectorsReader readerToTermVectorsReader(final LeafReader reader) {
|
||||
final TermVectors termVectors;
|
||||
try {
|
||||
termVectors = reader.termVectors();
|
||||
} catch (IOException e) {
|
||||
throw new UncheckedIOException(e);
|
||||
}
|
||||
return new TermVectorsReader() {
|
||||
@Override
|
||||
public Fields get(int docID) throws IOException {
|
||||
return reader.getTermVectors(docID);
|
||||
return termVectors.get(docID);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -402,8 +402,8 @@ public final class SortingCodecReader extends FilterCodecReader {
|
|||
private StoredFieldsReader newStoredFieldsReader(StoredFieldsReader delegate) {
|
||||
return new StoredFieldsReader() {
|
||||
@Override
|
||||
public void visitDocument(int docID, StoredFieldVisitor visitor) throws IOException {
|
||||
delegate.visitDocument(docMap.newToOld(docID), visitor);
|
||||
public void document(int docID, StoredFieldVisitor visitor) throws IOException {
|
||||
delegate.document(docMap.newToOld(docID), visitor);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -113,7 +113,7 @@ final class SortingStoredFieldsConsumer extends StoredFieldsConsumer {
|
|||
CopyVisitor visitor = new CopyVisitor(sortWriter);
|
||||
for (int docID = 0; docID < state.segmentInfo.maxDoc(); docID++) {
|
||||
sortWriter.startDocument();
|
||||
reader.visitDocument(sortMap == null ? docID : sortMap.newToOld(docID), visitor);
|
||||
reader.document(sortMap == null ? docID : sortMap.newToOld(docID), visitor);
|
||||
sortWriter.finishDocument();
|
||||
}
|
||||
sortWriter.finish(state.segmentInfo.maxDoc());
|
||||
|
|
|
@ -22,7 +22,7 @@ import org.apache.lucene.document.DocumentStoredFieldVisitor;
|
|||
|
||||
/**
|
||||
* Expert: provides a low-level means of accessing the stored field values in an index. See {@link
|
||||
* IndexReader#document(int, StoredFieldVisitor)}.
|
||||
* StoredFields#document(int, StoredFieldVisitor)}.
|
||||
*
|
||||
* <p><b>NOTE</b>: a {@code StoredFieldVisitor} implementation should not try to load or visit other
|
||||
* stored documents in the same reader because the implementation of stored fields for most codecs
|
||||
|
@ -30,7 +30,7 @@ import org.apache.lucene.document.DocumentStoredFieldVisitor;
|
|||
*
|
||||
* <p>See {@link DocumentStoredFieldVisitor}, which is a <code>StoredFieldVisitor</code> that builds
|
||||
* the {@link Document} containing all stored fields. This is used by {@link
|
||||
* IndexReader#document(int)}.
|
||||
* StoredFields#document(int)}.
|
||||
*
|
||||
* @lucene.experimental
|
||||
*/
|
||||
|
|
|
@ -0,0 +1,75 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.index;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Set;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.DocumentStoredFieldVisitor;
|
||||
import org.apache.lucene.util.Bits;
|
||||
|
||||
/**
|
||||
* API for reading stored fields.
|
||||
*
|
||||
* <p><b>NOTE</b>: This class is not thread-safe and should only be consumed in the thread where it
|
||||
* was acquired.
|
||||
*/
|
||||
public abstract class StoredFields {
|
||||
/** Sole constructor. (For invocation by subclass constructors, typically implicit.) */
|
||||
protected StoredFields() {}
|
||||
|
||||
/**
|
||||
* Returns the stored fields of the <code>n</code><sup>th</sup> <code>Document</code> in this
|
||||
* index. This is just sugar for using {@link DocumentStoredFieldVisitor}.
|
||||
*
|
||||
* <p><b>NOTE:</b> for performance reasons, this method does not check if the requested document
|
||||
* is deleted, and therefore asking for a deleted document may yield unspecified results. Usually
|
||||
* this is not required, however you can test if the doc is deleted by checking the {@link Bits}
|
||||
* returned from {@link MultiBits#getLiveDocs}.
|
||||
*
|
||||
* <p><b>NOTE:</b> only the content of a field is returned, if that field was stored during
|
||||
* indexing. Metadata like boost, omitNorm, IndexOptions, tokenized, etc., are not preserved.
|
||||
*
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws IOException if there is a low-level IO error
|
||||
*/
|
||||
// TODO: we need a separate StoredField, so that the
|
||||
// Document returned here contains that class not
|
||||
// IndexableField
|
||||
public final Document document(int docID) throws IOException {
|
||||
final DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor();
|
||||
document(docID, visitor);
|
||||
return visitor.getDocument();
|
||||
}
|
||||
|
||||
/**
|
||||
* Expert: visits the fields of a stored document, for custom processing/loading of each field. If
|
||||
* you simply want to load all fields, use {@link #document(int)}. If you want to load a subset,
|
||||
* use {@link DocumentStoredFieldVisitor}.
|
||||
*/
|
||||
public abstract void document(int docID, StoredFieldVisitor visitor) throws IOException;
|
||||
|
||||
/**
|
||||
* Like {@link #document(int)} but only loads the specified fields. Note that this is simply sugar
|
||||
* for {@link DocumentStoredFieldVisitor#DocumentStoredFieldVisitor(Set)}.
|
||||
*/
|
||||
public final Document document(int docID, Set<String> fieldsToLoad) throws IOException {
|
||||
final DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(fieldsToLoad);
|
||||
document(docID, visitor);
|
||||
return visitor.getDocument();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,65 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.index;
|
||||
|
||||
import java.io.IOException;
|
||||
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; // javadocs
|
||||
|
||||
/**
|
||||
* API for reading term vectors.
|
||||
*
|
||||
* <p><b>NOTE</b>: This class is not thread-safe and should only be consumed in the thread where it
|
||||
* was acquired.
|
||||
*/
|
||||
public abstract class TermVectors {
|
||||
|
||||
/** Sole constructor. (For invocation by subclass constructors, typically implicit.) */
|
||||
protected TermVectors() {}
|
||||
|
||||
/**
|
||||
* Returns term vectors for this document, or null if term vectors were not indexed.
|
||||
*
|
||||
* <p>The returned Fields instance acts like a single-document inverted index (the docID will be
|
||||
* 0). If offsets are available they are in an {@link OffsetAttribute} available from the {@link
|
||||
* PostingsEnum}.
|
||||
*/
|
||||
public abstract Fields get(int doc) throws IOException;
|
||||
|
||||
/**
|
||||
* Retrieve term vector for this document and field, or null if term vectors were not indexed.
|
||||
*
|
||||
* <p>The returned Terms instance acts like a single-document inverted index (the docID will be
|
||||
* 0). If offsets are available they are in an {@link OffsetAttribute} available from the {@link
|
||||
* PostingsEnum}.
|
||||
*/
|
||||
public final Terms get(int doc, String field) throws IOException {
|
||||
Fields vectors = get(doc);
|
||||
if (vectors == null) {
|
||||
return null;
|
||||
}
|
||||
return vectors.terms(field);
|
||||
}
|
||||
|
||||
/** Instance that never returns term vectors */
|
||||
public static final TermVectors EMPTY =
|
||||
new TermVectors() {
|
||||
@Override
|
||||
public Fields get(int doc) {
|
||||
return null;
|
||||
}
|
||||
};
|
||||
}
|
|
@ -143,7 +143,8 @@
|
|||
* // access indexed fields for an index segment
|
||||
* Fields fields = reader.fields();
|
||||
* // access term vector fields for a specified document
|
||||
* Fields fields = reader.getTermVectors(docid);
|
||||
* TermVectors vectors = reader.termVectors();
|
||||
* Fields fields = vectors.get(docid);
|
||||
* </pre>
|
||||
*
|
||||
* Fields implements Java's Iterable interface, so it's easy to enumerate the list of fields:
|
||||
|
|
|
@ -40,6 +40,7 @@ import org.apache.lucene.index.LeafReaderContext;
|
|||
import org.apache.lucene.index.QueryTimeout;
|
||||
import org.apache.lucene.index.ReaderUtil;
|
||||
import org.apache.lucene.index.StoredFieldVisitor;
|
||||
import org.apache.lucene.index.StoredFields;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.search.similarities.BM25Similarity;
|
||||
|
@ -382,7 +383,9 @@ public class IndexSearcher {
|
|||
* Sugar for <code>.getIndexReader().document(docID)</code>
|
||||
*
|
||||
* @see IndexReader#document(int)
|
||||
* @deprecated Use {@link #storedFields()} to access fields for one or more documents
|
||||
*/
|
||||
@Deprecated
|
||||
public Document doc(int docID) throws IOException {
|
||||
return reader.document(docID);
|
||||
}
|
||||
|
@ -391,7 +394,9 @@ public class IndexSearcher {
|
|||
* Sugar for <code>.getIndexReader().document(docID, fieldVisitor)</code>
|
||||
*
|
||||
* @see IndexReader#document(int, StoredFieldVisitor)
|
||||
* @deprecated Use {@link #storedFields()} to access fields for one or more documents
|
||||
*/
|
||||
@Deprecated
|
||||
public void doc(int docID, StoredFieldVisitor fieldVisitor) throws IOException {
|
||||
reader.document(docID, fieldVisitor);
|
||||
}
|
||||
|
@ -400,11 +405,38 @@ public class IndexSearcher {
|
|||
* Sugar for <code>.getIndexReader().document(docID, fieldsToLoad)</code>
|
||||
*
|
||||
* @see IndexReader#document(int, Set)
|
||||
* @deprecated Use {@link #storedFields()} to access fields for one or more documents
|
||||
*/
|
||||
@Deprecated
|
||||
public Document doc(int docID, Set<String> fieldsToLoad) throws IOException {
|
||||
return reader.document(docID, fieldsToLoad);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a {@link StoredFields} reader for the stored fields of this index.
|
||||
*
|
||||
* <p>Sugar for <code>.getIndexReader().storedFields()</code>
|
||||
*
|
||||
* <p>This call never returns {@code null}, even if no stored fields were indexed. The returned
|
||||
* instance should only be used by a single thread.
|
||||
*
|
||||
* <p>Example:
|
||||
*
|
||||
* <pre class="prettyprint">
|
||||
* TopDocs hits = searcher.search(query, 10);
|
||||
* StoredFields storedFields = searcher.storedFields();
|
||||
* for (ScoreDoc hit : hits.scoreDocs) {
|
||||
* Document doc = storedFields.document(hit.doc);
|
||||
* }
|
||||
* </pre>
|
||||
*
|
||||
* @throws IOException If there is a low-level IO error
|
||||
* @see IndexReader#storedFields()
|
||||
*/
|
||||
public StoredFields storedFields() throws IOException {
|
||||
return reader.storedFields();
|
||||
}
|
||||
|
||||
/** Expert: Set the Similarity implementation used by this IndexSearcher. */
|
||||
public void setSimilarity(Similarity similarity) {
|
||||
this.similarity = similarity;
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
package org.apache.lucene.search;
|
||||
|
||||
import java.io.IOException;
|
||||
import org.apache.lucene.index.StoredFields;
|
||||
|
||||
/**
|
||||
* Collector decouples the score from the collected doc: the score computation is skipped entirely
|
||||
|
@ -77,9 +78,8 @@ public interface LeafCollector {
|
|||
* swallow the exception and continue collection with the next leaf.
|
||||
*
|
||||
* <p>Note: This is called in an inner search loop. For good search performance, implementations
|
||||
* of this method should not call {@link IndexSearcher#doc(int)} or {@link
|
||||
* org.apache.lucene.index.IndexReader#document(int)} on every hit. Doing so can slow searches by
|
||||
* an order of magnitude or more.
|
||||
* of this method should not call {@link StoredFields#document} on every hit. Doing so can slow
|
||||
* searches by an order of magnitude or more.
|
||||
*/
|
||||
void collect(int doc) throws IOException;
|
||||
|
||||
|
|
|
@ -16,6 +16,8 @@
|
|||
*/
|
||||
package org.apache.lucene.search;
|
||||
|
||||
import org.apache.lucene.index.StoredFields;
|
||||
|
||||
/** Holds one hit in {@link TopDocs}. */
|
||||
public class ScoreDoc {
|
||||
|
||||
|
@ -25,7 +27,7 @@ public class ScoreDoc {
|
|||
/**
|
||||
* A hit document's number.
|
||||
*
|
||||
* @see IndexSearcher#doc(int)
|
||||
* @see StoredFields#document(int)
|
||||
*/
|
||||
public int doc;
|
||||
|
||||
|
|
|
@ -49,8 +49,9 @@ to check if the results are what we expect):</p>
|
|||
ScoreDoc[] hits = isearcher.search(query, 10).scoreDocs;
|
||||
assertEquals(1, hits.length);
|
||||
// Iterate through the results:
|
||||
StoredFields storedFields = isearcher.storedFields();
|
||||
for (int i = 0; i < hits.length; i++) {
|
||||
Document hitDoc = isearcher.doc(hits[i].doc);
|
||||
Document hitDoc = storedFields.document(hits[i].doc);
|
||||
assertEquals("This is the text to be indexed.", hitDoc.get("fieldname"));
|
||||
}
|
||||
ireader.close();
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.apache.lucene.index.DirectoryReader;
|
|||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.StoredFields;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.PhraseQuery;
|
||||
|
@ -72,8 +73,9 @@ public class TestDemo extends LuceneTestCase {
|
|||
assertEquals(1, hits.totalHits.value);
|
||||
|
||||
// Iterate through the results.
|
||||
StoredFields storedFields = searcher.storedFields();
|
||||
for (int i = 0; i < hits.scoreDocs.length; i++) {
|
||||
Document hitDoc = searcher.doc(hits.scoreDocs[i].doc);
|
||||
Document hitDoc = storedFields.document(hits.scoreDocs[i].doc);
|
||||
assertEquals(text, hitDoc.get("fieldname"));
|
||||
}
|
||||
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.apache.lucene.index.IndexReader;
|
|||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.MergePolicy;
|
||||
import org.apache.lucene.index.StoredFields;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
|
@ -116,8 +117,9 @@ public class TestSearch extends LuceneTestCase {
|
|||
hits = searcher.search(query, 1000, sort).scoreDocs;
|
||||
|
||||
out.println(hits.length + " total results");
|
||||
StoredFields storedFields = searcher.storedFields();
|
||||
for (int i = 0; i < hits.length && i < 10; i++) {
|
||||
Document d = searcher.doc(hits[i].doc);
|
||||
Document d = storedFields.document(hits[i].doc);
|
||||
out.println(i + " " + hits[i].score + " " + d.get("contents"));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.apache.lucene.index.IndexReader;
|
|||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.MergePolicy;
|
||||
import org.apache.lucene.index.StoredFields;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
|
@ -137,9 +138,10 @@ public class TestSearchForDuplicates extends LuceneTestCase {
|
|||
private void printHits(PrintWriter out, ScoreDoc[] hits, IndexSearcher searcher)
|
||||
throws IOException {
|
||||
out.println(hits.length + " total results\n");
|
||||
StoredFields storedFields = searcher.storedFields();
|
||||
for (int i = 0; i < hits.length; i++) {
|
||||
if (i < 10 || (i > 94 && i < 105)) {
|
||||
Document d = searcher.doc(hits[i].doc);
|
||||
Document d = storedFields.document(hits[i].doc);
|
||||
out.println(i + " " + d.get(ID_FIELD));
|
||||
}
|
||||
}
|
||||
|
@ -148,9 +150,10 @@ public class TestSearchForDuplicates extends LuceneTestCase {
|
|||
private void checkHits(ScoreDoc[] hits, int expectedCount, IndexSearcher searcher)
|
||||
throws IOException {
|
||||
assertEquals("total results", expectedCount, hits.length);
|
||||
StoredFields storedFields = searcher.storedFields();
|
||||
for (int i = 0; i < hits.length; i++) {
|
||||
if (i < 10 || (i > 94 && i < 105)) {
|
||||
Document d = searcher.doc(hits[i].doc);
|
||||
Document d = storedFields.document(hits[i].doc);
|
||||
assertEquals("check " + i, String.valueOf(i), d.get(ID_FIELD));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -55,6 +55,7 @@ import org.apache.lucene.index.SerialMergeScheduler;
|
|||
import org.apache.lucene.index.SortedDocValues;
|
||||
import org.apache.lucene.index.SortedNumericDocValues;
|
||||
import org.apache.lucene.index.SortedSetDocValues;
|
||||
import org.apache.lucene.index.StoredFields;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
|
@ -234,8 +235,9 @@ public class TestLucene90DocValuesFormat extends BaseCompressingDocValuesFormatT
|
|||
|
||||
final SortedSetDocValues sortedSet = DocValues.getSortedSet(reader, "sorted_set");
|
||||
|
||||
StoredFields storedFields = reader.storedFields();
|
||||
for (int i = 0; i < reader.maxDoc(); ++i) {
|
||||
final Document doc = reader.document(i);
|
||||
final Document doc = storedFields.document(i);
|
||||
final IndexableField valueField = doc.getField("value");
|
||||
final Long value = valueField == null ? null : valueField.numericValue().longValue();
|
||||
|
||||
|
@ -675,11 +677,12 @@ public class TestLucene90DocValuesFormat extends BaseCompressingDocValuesFormatT
|
|||
for (LeafReaderContext context : ir.leaves()) {
|
||||
LeafReader r = context.reader();
|
||||
SortedNumericDocValues docValues = DocValues.getSortedNumeric(r, "dv");
|
||||
StoredFields storedFields = r.storedFields();
|
||||
for (int i = 0; i < r.maxDoc(); i++) {
|
||||
if (i > docValues.docID()) {
|
||||
docValues.nextDoc();
|
||||
}
|
||||
String[] expectedStored = r.document(i).getValues("stored");
|
||||
String[] expectedStored = storedFields.document(i).getValues("stored");
|
||||
if (i < docValues.docID()) {
|
||||
assertEquals(0, expectedStored.length);
|
||||
} else {
|
||||
|
@ -747,6 +750,7 @@ public class TestLucene90DocValuesFormat extends BaseCompressingDocValuesFormatT
|
|||
TestUtil.checkReader(ir);
|
||||
for (LeafReaderContext context : ir.leaves()) {
|
||||
LeafReader r = context.reader();
|
||||
StoredFields storedFields = r.storedFields();
|
||||
|
||||
for (int jump = jumpStep; jump < r.maxDoc(); jump += jumpStep) {
|
||||
// Create a new instance each time to ensure jumps from the beginning
|
||||
|
@ -761,7 +765,7 @@ public class TestLucene90DocValuesFormat extends BaseCompressingDocValuesFormatT
|
|||
+ jump
|
||||
+ " from #"
|
||||
+ (docID - jump);
|
||||
String storedValue = r.document(docID).get("stored");
|
||||
String storedValue = storedFields.document(docID).get("stored");
|
||||
if (storedValue == null) {
|
||||
assertFalse("There should be no DocValue for " + base, docValues.advanceExact(docID));
|
||||
} else {
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.lucene.document.StoredField;
|
|||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.StoredFields;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.tests.index.BaseStoredFieldsFormatTestCase;
|
||||
|
||||
|
@ -57,8 +58,9 @@ public class TestLucene90StoredFieldsFormatHighCompression extends BaseStoredFie
|
|||
|
||||
DirectoryReader ir = DirectoryReader.open(dir);
|
||||
assertEquals(10, ir.numDocs());
|
||||
StoredFields storedFields = ir.storedFields();
|
||||
for (int i = 0; i < 10; i++) {
|
||||
Document doc = ir.document(i);
|
||||
Document doc = storedFields.document(i);
|
||||
assertEquals("value1", doc.get("field1"));
|
||||
assertEquals("value2", doc.get("field2"));
|
||||
}
|
||||
|
|
|
@ -60,7 +60,7 @@ public class TestCompressingTermVectorsFormat extends BaseTermVectorsFormatTestC
|
|||
doc.add(new Field("foo", "this is a test", ft));
|
||||
iw.addDocument(doc);
|
||||
LeafReader ir = getOnlyLeafReader(iw.getReader());
|
||||
Terms terms = ir.getTermVector(0, "foo");
|
||||
Terms terms = ir.termVectors().get(0, "foo");
|
||||
assertNotNull(terms);
|
||||
TermsEnum termsEnum = terms.iterator();
|
||||
assertEquals(SeekStatus.FOUND, termsEnum.seekCeil(new BytesRef("this")));
|
||||
|
|
|
@ -43,6 +43,7 @@ import org.apache.lucene.index.MergeState;
|
|||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.index.SegmentReadState;
|
||||
import org.apache.lucene.index.SegmentWriteState;
|
||||
import org.apache.lucene.index.StoredFields;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Query;
|
||||
|
@ -119,10 +120,11 @@ public class TestPerFieldDocValuesFormat extends BaseDocValuesFormatTestCase {
|
|||
Query query = new TermQuery(new Term("fieldname", "text"));
|
||||
TopDocs hits = isearcher.search(query, 1);
|
||||
assertEquals(1, hits.totalHits.value);
|
||||
StoredFields storedFields = isearcher.storedFields();
|
||||
// Iterate through the results:
|
||||
for (int i = 0; i < hits.scoreDocs.length; i++) {
|
||||
int hitDocID = hits.scoreDocs[i].doc;
|
||||
Document hitDoc = isearcher.doc(hitDocID);
|
||||
Document hitDoc = storedFields.document(hitDocID);
|
||||
assertEquals(text, hitDoc.get("fieldname"));
|
||||
assert ireader.leaves().size() == 1;
|
||||
NumericDocValues dv = ireader.leaves().get(0).reader().getNumericDocValues("dv1");
|
||||
|
|
|
@ -53,7 +53,7 @@ public class TestBinaryDocument extends LuceneTestCase {
|
|||
|
||||
/** open a reader and fetch the document */
|
||||
IndexReader reader = writer.getReader();
|
||||
Document docFromReader = reader.document(0);
|
||||
Document docFromReader = reader.storedFields().document(0);
|
||||
assertTrue(docFromReader != null);
|
||||
|
||||
/** fetch the binary stored field and compare its content with the original one */
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.apache.lucene.index.DirectoryReader;
|
|||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.index.StoredFields;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.PhraseQuery;
|
||||
|
@ -208,7 +209,7 @@ public class TestDocument extends LuceneTestCase {
|
|||
ScoreDoc[] hits = searcher.search(query, 1000).scoreDocs;
|
||||
assertEquals(1, hits.length);
|
||||
|
||||
doAssert(searcher.doc(hits[0].doc), true);
|
||||
doAssert(searcher.storedFields().document(hits[0].doc), true);
|
||||
writer.close();
|
||||
reader.close();
|
||||
dir.close();
|
||||
|
@ -234,7 +235,7 @@ public class TestDocument extends LuceneTestCase {
|
|||
ScoreDoc[] hits = searcher.search(query, 1000).scoreDocs;
|
||||
assertEquals(1, hits.length);
|
||||
|
||||
doAssert(searcher.doc(hits[0].doc), true);
|
||||
doAssert(searcher.storedFields().document(hits[0].doc), true);
|
||||
writer.close();
|
||||
reader.close();
|
||||
dir.close();
|
||||
|
@ -313,8 +314,9 @@ public class TestDocument extends LuceneTestCase {
|
|||
ScoreDoc[] hits = searcher.search(query, 1000).scoreDocs;
|
||||
assertEquals(3, hits.length);
|
||||
int result = 0;
|
||||
StoredFields storedFields = searcher.storedFields();
|
||||
for (int i = 0; i < 3; i++) {
|
||||
Document doc2 = searcher.doc(hits[i].doc);
|
||||
Document doc2 = storedFields.document(hits[i].doc);
|
||||
Field f = (Field) doc2.getField("id");
|
||||
if (f.stringValue().equals("id1")) result |= 1;
|
||||
else if (f.stringValue().equals("id2")) result |= 2;
|
||||
|
@ -350,7 +352,7 @@ public class TestDocument extends LuceneTestCase {
|
|||
RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
|
||||
iw.addDocument(doc);
|
||||
DirectoryReader ir = iw.getReader();
|
||||
Document sdoc = ir.document(0);
|
||||
Document sdoc = ir.storedFields().document(0);
|
||||
assertEquals("5", sdoc.get("int"));
|
||||
assertNull(sdoc.get("somethingElse"));
|
||||
assertArrayEquals(new String[] {"5", "4"}, sdoc.getValues("int"));
|
||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.lucene.document;
|
|||
import java.io.IOException;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.StoredFields;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||
import org.apache.lucene.search.Sort;
|
||||
|
@ -68,14 +69,15 @@ public class TestFeatureSort extends LuceneTestCase {
|
|||
writer.close();
|
||||
|
||||
IndexSearcher searcher = newSearcher(ir);
|
||||
StoredFields storedFields = searcher.storedFields();
|
||||
Sort sort = new Sort(FeatureField.newFeatureSort("field", "name"));
|
||||
|
||||
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
|
||||
assertEquals(3, td.totalHits.value);
|
||||
// numeric order
|
||||
assertEquals("30.1", searcher.doc(td.scoreDocs[0].doc).get("value"));
|
||||
assertEquals("4.2", searcher.doc(td.scoreDocs[1].doc).get("value"));
|
||||
assertEquals("1.3", searcher.doc(td.scoreDocs[2].doc).get("value"));
|
||||
assertEquals("30.1", storedFields.document(td.scoreDocs[0].doc).get("value"));
|
||||
assertEquals("4.2", storedFields.document(td.scoreDocs[1].doc).get("value"));
|
||||
assertEquals("1.3", storedFields.document(td.scoreDocs[2].doc).get("value"));
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
|
@ -101,13 +103,14 @@ public class TestFeatureSort extends LuceneTestCase {
|
|||
|
||||
IndexSearcher searcher = newSearcher(ir);
|
||||
Sort sort = new Sort(FeatureField.newFeatureSort("field", "name"));
|
||||
StoredFields storedFields = searcher.storedFields();
|
||||
|
||||
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
|
||||
assertEquals(3, td.totalHits.value);
|
||||
// null is treated as 0
|
||||
assertEquals("4.2", searcher.doc(td.scoreDocs[0].doc).get("value"));
|
||||
assertEquals("1.3", searcher.doc(td.scoreDocs[1].doc).get("value"));
|
||||
assertNull(searcher.doc(td.scoreDocs[2].doc).get("value"));
|
||||
assertEquals("4.2", storedFields.document(td.scoreDocs[0].doc).get("value"));
|
||||
assertEquals("1.3", storedFields.document(td.scoreDocs[1].doc).get("value"));
|
||||
assertNull(storedFields.document(td.scoreDocs[2].doc).get("value"));
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
|
@ -134,13 +137,14 @@ public class TestFeatureSort extends LuceneTestCase {
|
|||
|
||||
IndexSearcher searcher = newSearcher(ir);
|
||||
Sort sort = new Sort(FeatureField.newFeatureSort("field", "name"));
|
||||
StoredFields storedFields = searcher.storedFields();
|
||||
|
||||
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
|
||||
assertEquals(3, td.totalHits.value);
|
||||
// null is treated as 0
|
||||
assertEquals("4.2", searcher.doc(td.scoreDocs[0].doc).get("value"));
|
||||
assertEquals("1.3", searcher.doc(td.scoreDocs[1].doc).get("value"));
|
||||
assertNull(searcher.doc(td.scoreDocs[2].doc).get("value"));
|
||||
assertEquals("4.2", storedFields.document(td.scoreDocs[0].doc).get("value"));
|
||||
assertEquals("1.3", storedFields.document(td.scoreDocs[1].doc).get("value"));
|
||||
assertNull(storedFields.document(td.scoreDocs[2].doc).get("value"));
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
|
@ -168,13 +172,14 @@ public class TestFeatureSort extends LuceneTestCase {
|
|||
|
||||
IndexSearcher searcher = newSearcher(ir);
|
||||
Sort sort = new Sort(FeatureField.newFeatureSort("field", "name"));
|
||||
StoredFields storedFields = searcher.storedFields();
|
||||
|
||||
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
|
||||
assertEquals(3, td.totalHits.value);
|
||||
// null is treated as 0
|
||||
assertEquals("4.2", searcher.doc(td.scoreDocs[0].doc).get("value"));
|
||||
assertEquals("1.3", searcher.doc(td.scoreDocs[1].doc).get("value"));
|
||||
assertNull(searcher.doc(td.scoreDocs[2].doc).get("value"));
|
||||
assertEquals("4.2", storedFields.document(td.scoreDocs[0].doc).get("value"));
|
||||
assertEquals("1.3", storedFields.document(td.scoreDocs[1].doc).get("value"));
|
||||
assertNull(storedFields.document(td.scoreDocs[2].doc).get("value"));
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
|
@ -208,17 +213,18 @@ public class TestFeatureSort extends LuceneTestCase {
|
|||
|
||||
IndexSearcher searcher = newSearcher(ir);
|
||||
Sort sort = new Sort(FeatureField.newFeatureSort("field", "name"));
|
||||
StoredFields storedFields = searcher.storedFields();
|
||||
|
||||
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
|
||||
assertEquals(7, td.totalHits.value);
|
||||
// null is treated as 0
|
||||
assertEquals("4.2", searcher.doc(td.scoreDocs[0].doc).get("value"));
|
||||
assertEquals("1.3", searcher.doc(td.scoreDocs[1].doc).get("value"));
|
||||
assertNull(searcher.doc(td.scoreDocs[2].doc).get("value"));
|
||||
assertNull(searcher.doc(td.scoreDocs[3].doc).get("value"));
|
||||
assertNull(searcher.doc(td.scoreDocs[4].doc).get("value"));
|
||||
assertNull(searcher.doc(td.scoreDocs[5].doc).get("value"));
|
||||
assertNull(searcher.doc(td.scoreDocs[6].doc).get("value"));
|
||||
assertEquals("4.2", storedFields.document(td.scoreDocs[0].doc).get("value"));
|
||||
assertEquals("1.3", storedFields.document(td.scoreDocs[1].doc).get("value"));
|
||||
assertNull(storedFields.document(td.scoreDocs[2].doc).get("value"));
|
||||
assertNull(storedFields.document(td.scoreDocs[3].doc).get("value"));
|
||||
assertNull(storedFields.document(td.scoreDocs[4].doc).get("value"));
|
||||
assertNull(storedFields.document(td.scoreDocs[5].doc).get("value"));
|
||||
assertNull(storedFields.document(td.scoreDocs[6].doc).get("value"));
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
|
|
|
@ -505,7 +505,7 @@ public class TestField extends LuceneTestCase {
|
|||
IndexSearcher s = newSearcher(r);
|
||||
TopDocs hits = s.search(new TermQuery(new Term("binary", br)), 1);
|
||||
assertEquals(1, hits.totalHits.value);
|
||||
Document storedDoc = s.doc(hits.scoreDocs[0].doc);
|
||||
Document storedDoc = s.storedFields().document(hits.scoreDocs[0].doc);
|
||||
assertEquals(br, storedDoc.getField("binary").binaryValue());
|
||||
|
||||
r.close();
|
||||
|
|
|
@ -26,6 +26,7 @@ import java.util.Arrays;
|
|||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.SerialMergeScheduler;
|
||||
import org.apache.lucene.index.StoredFields;
|
||||
import org.apache.lucene.search.FieldDoc;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||
|
@ -209,6 +210,7 @@ public class TestLatLonPointDistanceSort extends LuceneTestCase {
|
|||
IndexReader reader = writer.getReader();
|
||||
IndexSearcher searcher = newSearcher(reader);
|
||||
|
||||
StoredFields storedFields = reader.storedFields();
|
||||
for (int i = 0; i < numQueries; i++) {
|
||||
double lat = GeoTestUtil.nextLatitude();
|
||||
double lon = GeoTestUtil.nextLongitude();
|
||||
|
@ -217,7 +219,7 @@ public class TestLatLonPointDistanceSort extends LuceneTestCase {
|
|||
Result[] expected = new Result[reader.maxDoc()];
|
||||
|
||||
for (int doc = 0; doc < reader.maxDoc(); doc++) {
|
||||
Document targetDoc = reader.document(doc);
|
||||
Document targetDoc = storedFields.document(doc);
|
||||
final double distance;
|
||||
if (targetDoc.getField("lat") == null) {
|
||||
distance = missingValue; // missing
|
||||
|
|
|
@ -106,7 +106,7 @@ public class Test4GBStoredFields extends LuceneTestCase {
|
|||
}
|
||||
|
||||
DirectoryReader rd = DirectoryReader.open(dir);
|
||||
Document sd = rd.document(numDocs - 1);
|
||||
Document sd = rd.storedFields().document(numDocs - 1);
|
||||
assertNotNull(sd);
|
||||
assertEquals(1, sd.getFields().size());
|
||||
BytesRef valueRef = sd.getBinaryValue("fld");
|
||||
|
|
|
@ -1547,8 +1547,9 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
IndexReader r3 = w.getReader();
|
||||
w.close();
|
||||
assertEquals(2, r3.numDocs());
|
||||
StoredFields storedFields = r3.storedFields();
|
||||
for (int docID = 0; docID < 2; docID++) {
|
||||
Document d = r3.document(docID);
|
||||
Document d = storedFields.document(docID);
|
||||
if (d.get("id").equals("1")) {
|
||||
assertEquals("doc1 field1", d.get("f1"));
|
||||
} else {
|
||||
|
|
|
@ -758,11 +758,12 @@ public class TestBinaryDocValuesUpdates extends LuceneTestCase {
|
|||
BinaryDocValues values = leafReader.getBinaryDocValues("number");
|
||||
NumericDocValues sortValues = leafReader.getNumericDocValues("sort");
|
||||
Bits liveDocs = leafReader.getLiveDocs();
|
||||
StoredFields storedFields = leafReader.storedFields();
|
||||
|
||||
long lastSortValue = Long.MIN_VALUE;
|
||||
for (int i = 0; i < leafReader.maxDoc(); i++) {
|
||||
|
||||
Document doc = leafReader.document(i);
|
||||
Document doc = storedFields.document(i);
|
||||
OneSortDoc sortDoc = docs.get(Integer.parseInt(doc.get("id")));
|
||||
|
||||
assertEquals(i, values.nextDoc());
|
||||
|
|
|
@ -58,7 +58,7 @@ public class TestBinaryTerms extends LuceneTestCase {
|
|||
bytes.length = 2;
|
||||
TopDocs docs = is.search(new TermQuery(new Term("bytes", bytes)), 5);
|
||||
assertEquals(1, docs.totalHits.value);
|
||||
assertEquals("" + i, is.doc(docs.scoreDocs[0].doc).get("id"));
|
||||
assertEquals("" + i, is.storedFields().document(docs.scoreDocs[0].doc).get("id"));
|
||||
}
|
||||
|
||||
ir.close();
|
||||
|
|
|
@ -72,8 +72,9 @@ public class TestCustomNorms extends LuceneTestCase {
|
|||
DirectoryReader open = DirectoryReader.open(dir);
|
||||
NumericDocValues norms = MultiDocValues.getNormValues(open, FLOAT_TEST_FIELD);
|
||||
assertNotNull(norms);
|
||||
StoredFields storedFields = open.storedFields();
|
||||
for (int i = 0; i < open.maxDoc(); i++) {
|
||||
Document document = open.document(i);
|
||||
Document document = storedFields.document(i);
|
||||
int expected = Integer.parseInt(document.get(FLOAT_TEST_FIELD).split(" ")[0]);
|
||||
assertEquals(i, norms.nextDoc());
|
||||
assertEquals(expected, norms.longValue());
|
||||
|
|
|
@ -438,7 +438,7 @@ public class TestCustomTermFreq extends LuceneTestCase {
|
|||
|
||||
IndexReader r = DirectoryReader.open(w);
|
||||
|
||||
Fields fields = r.getTermVectors(0);
|
||||
Fields fields = r.termVectors().get(0);
|
||||
TermsEnum termsEnum = fields.terms("field").iterator();
|
||||
assertTrue(termsEnum.seekExact(newBytesRef("bar")));
|
||||
assertEquals(228, termsEnum.totalTermFreq());
|
||||
|
@ -456,7 +456,7 @@ public class TestCustomTermFreq extends LuceneTestCase {
|
|||
assertEquals(59, postings.freq());
|
||||
assertEquals(NO_MORE_DOCS, postings.nextDoc());
|
||||
|
||||
fields = r.getTermVectors(1);
|
||||
fields = r.termVectors().get(1);
|
||||
termsEnum = fields.terms("field").iterator();
|
||||
assertTrue(termsEnum.seekExact(newBytesRef("bar")));
|
||||
assertEquals(140, termsEnum.totalTermFreq());
|
||||
|
|
|
@ -794,11 +794,12 @@ public class TestDemoParallelLeafReader extends LuceneTestCase {
|
|||
iwc.setMergePolicy(new LogByteSizeMergePolicy());
|
||||
IndexWriter w = new IndexWriter(parallelDir, iwc);
|
||||
int maxDoc = reader.maxDoc();
|
||||
StoredFields storedFields = reader.storedFields();
|
||||
|
||||
// Slowly parse the stored field into a new doc values field:
|
||||
for (int i = 0; i < maxDoc; i++) {
|
||||
// TODO: is this still O(blockSize^2)?
|
||||
Document oldDoc = reader.document(i);
|
||||
Document oldDoc = storedFields.document(i);
|
||||
Document newDoc = new Document();
|
||||
long value = Long.parseLong(oldDoc.get("text").split(" ")[1]);
|
||||
newDoc.add(new NumericDocValuesField("number", value));
|
||||
|
@ -850,12 +851,13 @@ public class TestDemoParallelLeafReader extends LuceneTestCase {
|
|||
iwc.setMergePolicy(new LogByteSizeMergePolicy());
|
||||
IndexWriter w = new IndexWriter(parallelDir, iwc);
|
||||
int maxDoc = reader.maxDoc();
|
||||
StoredFields storedFields = reader.storedFields();
|
||||
|
||||
if (oldSchemaGen <= 0) {
|
||||
// Must slowly parse the stored field into a new doc values field:
|
||||
for (int i = 0; i < maxDoc; i++) {
|
||||
// TODO: is this still O(blockSize^2)?
|
||||
Document oldDoc = reader.document(i);
|
||||
Document oldDoc = storedFields.document(i);
|
||||
Document newDoc = new Document();
|
||||
long value = Long.parseLong(oldDoc.get("text").split(" ")[1]);
|
||||
newDoc.add(new NumericDocValuesField("number_" + newSchemaGen, value));
|
||||
|
@ -869,7 +871,7 @@ public class TestDemoParallelLeafReader extends LuceneTestCase {
|
|||
for (int i = 0; i < maxDoc; i++) {
|
||||
// TODO: is this still O(blockSize^2)?
|
||||
assertEquals(i, oldValues.nextDoc());
|
||||
reader.document(i);
|
||||
storedFields.document(i);
|
||||
Document newDoc = new Document();
|
||||
newDoc.add(new NumericDocValuesField("number_" + newSchemaGen, oldValues.longValue()));
|
||||
w.addDocument(newDoc);
|
||||
|
@ -904,9 +906,10 @@ public class TestDemoParallelLeafReader extends LuceneTestCase {
|
|||
return;
|
||||
}
|
||||
int maxDoc = r.maxDoc();
|
||||
StoredFields storedFields = r.storedFields();
|
||||
boolean failed = false;
|
||||
for (int i = 0; i < maxDoc; i++) {
|
||||
Document oldDoc = r.document(i);
|
||||
Document oldDoc = storedFields.document(i);
|
||||
long value = Long.parseLong(oldDoc.get("text").split(" ")[1]);
|
||||
assertEquals(i, numbers.nextDoc());
|
||||
if (value != numbers.longValue()) {
|
||||
|
@ -978,12 +981,13 @@ public class TestDemoParallelLeafReader extends LuceneTestCase {
|
|||
iwc.setMergePolicy(new LogByteSizeMergePolicy());
|
||||
IndexWriter w = new IndexWriter(parallelDir, iwc);
|
||||
int maxDoc = reader.maxDoc();
|
||||
StoredFields storedFields = reader.storedFields();
|
||||
|
||||
if (oldSchemaGen <= 0) {
|
||||
// Must slowly parse the stored field into a new doc values field:
|
||||
for (int i = 0; i < maxDoc; i++) {
|
||||
// TODO: is this still O(blockSize^2)?
|
||||
Document oldDoc = reader.document(i);
|
||||
Document oldDoc = storedFields.document(i);
|
||||
Document newDoc = new Document();
|
||||
long value = Long.parseLong(oldDoc.get("text").split(" ")[1]);
|
||||
newDoc.add(new NumericDocValuesField("number", newSchemaGen * value));
|
||||
|
@ -995,7 +999,7 @@ public class TestDemoParallelLeafReader extends LuceneTestCase {
|
|||
assertNotNull("oldSchemaGen=" + oldSchemaGen, oldValues);
|
||||
for (int i = 0; i < maxDoc; i++) {
|
||||
// TODO: is this still O(blockSize^2)?
|
||||
reader.document(i);
|
||||
storedFields.document(i);
|
||||
Document newDoc = new Document();
|
||||
assertEquals(i, oldValues.nextDoc());
|
||||
long value = newSchemaGen * (oldValues.longValue() / oldSchemaGen);
|
||||
|
@ -1035,9 +1039,10 @@ public class TestDemoParallelLeafReader extends LuceneTestCase {
|
|||
return;
|
||||
}
|
||||
int maxDoc = r.maxDoc();
|
||||
StoredFields storedFields = r.storedFields();
|
||||
boolean failed = false;
|
||||
for (int i = 0; i < maxDoc; i++) {
|
||||
Document oldDoc = r.document(i);
|
||||
Document oldDoc = storedFields.document(i);
|
||||
long value = Long.parseLong(oldDoc.get("text").split(" ")[1]);
|
||||
value *= schemaGen;
|
||||
assertEquals(i, numbers.nextDoc());
|
||||
|
@ -1348,8 +1353,9 @@ public class TestDemoParallelLeafReader extends LuceneTestCase {
|
|||
NumericDocValues numbers = leaf.getNumericDocValues("number");
|
||||
if (numbers != null) {
|
||||
int maxDoc = leaf.maxDoc();
|
||||
StoredFields storedFields = leaf.storedFields();
|
||||
for (int i = 0; i < maxDoc; i++) {
|
||||
Document doc = leaf.document(i);
|
||||
Document doc = storedFields.document(i);
|
||||
long value = Long.parseLong(doc.get("text").split(" ")[1]);
|
||||
assertEquals(i, numbers.nextDoc());
|
||||
long dvValue = numbers.longValue();
|
||||
|
@ -1516,9 +1522,10 @@ public class TestDemoParallelLeafReader extends LuceneTestCase {
|
|||
IndexReader r, String fieldName, boolean doThrow, int multiplier) throws IOException {
|
||||
NumericDocValues numbers = MultiDocValues.getNumericValues(r, fieldName);
|
||||
int maxDoc = r.maxDoc();
|
||||
StoredFields storedFields = r.storedFields();
|
||||
boolean failed = false;
|
||||
for (int i = 0; i < maxDoc; i++) {
|
||||
Document oldDoc = r.document(i);
|
||||
Document oldDoc = storedFields.document(i);
|
||||
long value = multiplier * Long.parseLong(oldDoc.get("text").split(" ")[1]);
|
||||
assertEquals(i, numbers.nextDoc());
|
||||
if (value != numbers.longValue()) {
|
||||
|
@ -1560,9 +1567,10 @@ public class TestDemoParallelLeafReader extends LuceneTestCase {
|
|||
TopDocs hits =
|
||||
s.search(
|
||||
new MatchAllDocsQuery(), 100, new Sort(new SortField("number", SortField.Type.LONG)));
|
||||
StoredFields storedFields = s.storedFields();
|
||||
long last = Long.MIN_VALUE;
|
||||
for (ScoreDoc scoreDoc : hits.scoreDocs) {
|
||||
long value = Long.parseLong(s.doc(scoreDoc.doc).get("text").split(" ")[1]);
|
||||
long value = Long.parseLong(storedFields.document(scoreDoc.doc).get("text").split(" ")[1]);
|
||||
assertTrue(value >= last);
|
||||
assertEquals(value, ((Long) ((FieldDoc) scoreDoc).fields[0]).longValue());
|
||||
last = value;
|
||||
|
@ -1581,8 +1589,9 @@ public class TestDemoParallelLeafReader extends LuceneTestCase {
|
|||
}
|
||||
|
||||
TopDocs hits = s.search(LongPoint.newRangeQuery("number", min, max), 100);
|
||||
StoredFields storedFields = s.storedFields();
|
||||
for (ScoreDoc scoreDoc : hits.scoreDocs) {
|
||||
long value = Long.parseLong(s.doc(scoreDoc.doc).get("text").split(" ")[1]);
|
||||
long value = Long.parseLong(storedFields.document(scoreDoc.doc).get("text").split(" ")[1]);
|
||||
assertTrue(value >= min);
|
||||
assertTrue(value <= max);
|
||||
}
|
||||
|
@ -1602,7 +1611,7 @@ public class TestDemoParallelLeafReader extends LuceneTestCase {
|
|||
numbers.advance(hit.doc);
|
||||
}
|
||||
assertEquals(hit.doc, numbers.docID());
|
||||
long value = Long.parseLong(s.doc(hit.doc).get("text").split(" ")[1]);
|
||||
long value = Long.parseLong(storedFields.document(hit.doc).get("text").split(" ")[1]);
|
||||
assertEquals(value, numbers.longValue());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -66,16 +66,17 @@ public class TestDirectoryReader extends LuceneTestCase {
|
|||
DirectoryReader reader = DirectoryReader.open(dir);
|
||||
assertTrue(reader != null);
|
||||
assertTrue(reader instanceof StandardDirectoryReader);
|
||||
StoredFields storedFields = reader.storedFields();
|
||||
|
||||
Document newDoc1 = reader.document(0);
|
||||
Document newDoc1 = storedFields.document(0);
|
||||
assertTrue(newDoc1 != null);
|
||||
assertTrue(
|
||||
DocHelper.numFields(newDoc1) == DocHelper.numFields(doc1) - DocHelper.unstored.size());
|
||||
Document newDoc2 = reader.document(1);
|
||||
Document newDoc2 = storedFields.document(1);
|
||||
assertTrue(newDoc2 != null);
|
||||
assertTrue(
|
||||
DocHelper.numFields(newDoc2) == DocHelper.numFields(doc2) - DocHelper.unstored.size());
|
||||
Terms vector = reader.getTermVectors(0).terms(DocHelper.TEXT_FIELD_2_KEY);
|
||||
Terms vector = reader.termVectors().get(0).terms(DocHelper.TEXT_FIELD_2_KEY);
|
||||
assertNotNull(vector);
|
||||
|
||||
reader.close();
|
||||
|
@ -393,7 +394,7 @@ public class TestDirectoryReader extends LuceneTestCase {
|
|||
writer.addDocument(doc);
|
||||
writer.close();
|
||||
DirectoryReader reader = DirectoryReader.open(dir);
|
||||
Document doc2 = reader.document(reader.maxDoc() - 1);
|
||||
Document doc2 = reader.storedFields().document(reader.maxDoc() - 1);
|
||||
IndexableField[] fields = doc2.getFields("bin1");
|
||||
assertNotNull(fields);
|
||||
assertEquals(1, fields.length);
|
||||
|
@ -416,7 +417,7 @@ public class TestDirectoryReader extends LuceneTestCase {
|
|||
writer.forceMerge(1);
|
||||
writer.close();
|
||||
reader = DirectoryReader.open(dir);
|
||||
doc2 = reader.document(reader.maxDoc() - 1);
|
||||
doc2 = reader.storedFields().document(reader.maxDoc() - 1);
|
||||
fields = doc2.getFields("bin1");
|
||||
assertNotNull(fields);
|
||||
assertEquals(1, fields.length);
|
||||
|
@ -612,10 +613,12 @@ public class TestDirectoryReader extends LuceneTestCase {
|
|||
}
|
||||
|
||||
// check stored fields
|
||||
StoredFields storedFields1 = index1.storedFields();
|
||||
StoredFields storedFields2 = index2.storedFields();
|
||||
for (int i = 0; i < index1.maxDoc(); i++) {
|
||||
if (liveDocs1 == null || liveDocs1.get(i)) {
|
||||
Document doc1 = index1.document(i);
|
||||
Document doc2 = index2.document(i);
|
||||
Document doc1 = storedFields1.document(i);
|
||||
Document doc2 = storedFields2.document(i);
|
||||
List<IndexableField> field1 = doc1.getFields();
|
||||
List<IndexableField> field2 = doc2.getFields();
|
||||
assertEquals(
|
||||
|
@ -998,11 +1001,11 @@ public class TestDirectoryReader extends LuceneTestCase {
|
|||
writer.addDocument(new Document());
|
||||
DirectoryReader r = DirectoryReader.open(writer);
|
||||
writer.close();
|
||||
r.document(0);
|
||||
r.storedFields().document(0);
|
||||
expectThrows(
|
||||
IllegalArgumentException.class,
|
||||
() -> {
|
||||
r.document(1);
|
||||
r.storedFields().document(1);
|
||||
});
|
||||
r.close();
|
||||
dir.close();
|
||||
|
@ -1084,9 +1087,9 @@ public class TestDirectoryReader extends LuceneTestCase {
|
|||
DirectoryReader r = writer.getReader();
|
||||
writer.close();
|
||||
Set<String> fieldsToLoad = new HashSet<>();
|
||||
assertEquals(0, r.document(0, fieldsToLoad).getFields().size());
|
||||
assertEquals(0, r.storedFields().document(0, fieldsToLoad).getFields().size());
|
||||
fieldsToLoad.add("field1");
|
||||
Document doc2 = r.document(0, fieldsToLoad);
|
||||
Document doc2 = r.storedFields().document(0, fieldsToLoad);
|
||||
assertEquals(1, doc2.getFields().size());
|
||||
assertEquals("foobar", doc2.get("field1"));
|
||||
r.close();
|
||||
|
|
|
@ -27,6 +27,7 @@ import java.util.Map;
|
|||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.DocumentStoredFieldVisitor;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.FieldType;
|
||||
import org.apache.lucene.document.NumericDocValuesField;
|
||||
|
@ -132,7 +133,9 @@ public class TestDirectoryReaderReopen extends LuceneTestCase {
|
|||
if (i > 0) {
|
||||
int k = i - 1;
|
||||
int n = j + k * M;
|
||||
Document prevItereationDoc = reader.document(n);
|
||||
final DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor();
|
||||
reader.storedFields().document(n, visitor);
|
||||
Document prevItereationDoc = visitor.getDocument();
|
||||
assertNotNull(prevItereationDoc);
|
||||
String id = prevItereationDoc.get("id");
|
||||
assertEquals(k + "_" + j, id);
|
||||
|
@ -285,7 +288,7 @@ public class TestDirectoryReaderReopen extends LuceneTestCase {
|
|||
1000)
|
||||
.scoreDocs;
|
||||
if (hits.length > 0) {
|
||||
searcher.doc(hits[0].doc);
|
||||
searcher.storedFields().document(hits[0].doc);
|
||||
}
|
||||
if (refreshed != r) {
|
||||
refreshed.close();
|
||||
|
|
|
@ -258,7 +258,10 @@ public class TestDoc extends LuceneTestCase {
|
|||
private void printSegment(PrintWriter out, SegmentCommitInfo si) throws Exception {
|
||||
SegmentReader reader = new SegmentReader(si, Version.LATEST.major, newIOContext(random()));
|
||||
|
||||
for (int i = 0; i < reader.numDocs(); i++) out.println(reader.document(i));
|
||||
StoredFields storedFields = reader.storedFields();
|
||||
for (int i = 0; i < reader.numDocs(); i++) {
|
||||
out.println(storedFields.document(i));
|
||||
}
|
||||
|
||||
for (FieldInfo fieldInfo : reader.getFieldInfos()) {
|
||||
if (fieldInfo.getIndexOptions() == IndexOptions.NONE) {
|
||||
|
|
|
@ -212,10 +212,11 @@ public class TestDocValuesIndexing extends LuceneTestCase {
|
|||
FieldInfo dvInfo = fi.fieldInfo("dv");
|
||||
assertTrue(dvInfo.getDocValuesType() != DocValuesType.NONE);
|
||||
NumericDocValues dv = MultiDocValues.getNumericValues(r, "dv");
|
||||
StoredFields storedFields = r.storedFields();
|
||||
for (int i = 0; i < 50; i++) {
|
||||
assertEquals(i, dv.nextDoc());
|
||||
assertEquals(i, dv.longValue());
|
||||
Document d = r.document(i);
|
||||
Document d = storedFields.document(i);
|
||||
// cannot use d.get("dv") due to another bug!
|
||||
assertNull(d.getField("dv"));
|
||||
assertEquals(Integer.toString(i), d.get("docId"));
|
||||
|
|
|
@ -82,7 +82,7 @@ public class TestDocumentWriter extends LuceneTestCase {
|
|||
// After adding the document, we should be able to read it back in
|
||||
SegmentReader reader = new SegmentReader(info, Version.LATEST.major, newIOContext(random()));
|
||||
assertTrue(reader != null);
|
||||
Document doc = reader.document(0);
|
||||
Document doc = reader.storedFields().document(0);
|
||||
assertTrue(doc != null);
|
||||
|
||||
// System.out.println("Document: " + doc);
|
||||
|
|
|
@ -90,7 +90,7 @@ public class TestFieldsReader extends LuceneTestCase {
|
|||
assertTrue(dir != null);
|
||||
assertTrue(fieldInfos != null);
|
||||
IndexReader reader = DirectoryReader.open(dir);
|
||||
Document doc = reader.document(0);
|
||||
Document doc = reader.storedFields().document(0);
|
||||
assertTrue(doc != null);
|
||||
assertTrue(doc.getField(DocHelper.TEXT_FIELD_1_KEY) != null);
|
||||
|
||||
|
@ -114,7 +114,7 @@ public class TestFieldsReader extends LuceneTestCase {
|
|||
assertTrue(field.fieldType().indexOptions() == IndexOptions.DOCS);
|
||||
|
||||
DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(DocHelper.TEXT_FIELD_3_KEY);
|
||||
reader.document(0, visitor);
|
||||
reader.storedFields().document(0, visitor);
|
||||
final List<IndexableField> fields = visitor.getDocument().getFields();
|
||||
assertEquals(1, fields.size());
|
||||
assertEquals(DocHelper.TEXT_FIELD_3_KEY, fields.get(0).name());
|
||||
|
@ -214,9 +214,10 @@ public class TestFieldsReader extends LuceneTestCase {
|
|||
|
||||
boolean exc = false;
|
||||
|
||||
StoredFields storedFields = reader.storedFields();
|
||||
for (int i = 0; i < 2; i++) {
|
||||
try {
|
||||
reader.document(i);
|
||||
storedFields.document(i);
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
IOException ioe) {
|
||||
|
@ -224,7 +225,7 @@ public class TestFieldsReader extends LuceneTestCase {
|
|||
exc = true;
|
||||
}
|
||||
try {
|
||||
reader.document(i);
|
||||
storedFields.document(i);
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
IOException ioe) {
|
||||
|
|
|
@ -1770,6 +1770,7 @@ public class TestIndexSorting extends LuceneTestCase {
|
|||
|
||||
// Now check that the index is consistent
|
||||
IndexSearcher searcher = newSearcher(reader);
|
||||
StoredFields storedFields = reader.storedFields();
|
||||
for (int i = 0; i < numDocs; ++i) {
|
||||
TermQuery termQuery = new TermQuery(new Term("id", Integer.toString(i)));
|
||||
final TopDocs topDocs = searcher.search(termQuery, 1);
|
||||
|
@ -1780,7 +1781,7 @@ public class TestIndexSorting extends LuceneTestCase {
|
|||
NumericDocValues values = MultiDocValues.getNumericValues(reader, "id");
|
||||
assertEquals(topDocs.scoreDocs[0].doc, values.advance(topDocs.scoreDocs[0].doc));
|
||||
assertEquals(i, values.longValue());
|
||||
Document document = reader.document(topDocs.scoreDocs[0].doc);
|
||||
Document document = storedFields.document(topDocs.scoreDocs[0].doc);
|
||||
assertEquals(Integer.toString(i), document.get("id"));
|
||||
}
|
||||
}
|
||||
|
@ -1821,6 +1822,7 @@ public class TestIndexSorting extends LuceneTestCase {
|
|||
DirectoryReader reader = DirectoryReader.open(w);
|
||||
// Now check that the index is consistent
|
||||
IndexSearcher searcher = newSearcher(reader);
|
||||
StoredFields storedFields = reader.storedFields();
|
||||
for (int i = 0; i < numDocs; ++i) {
|
||||
TermQuery termQuery = new TermQuery(new Term("id", Integer.toString(i)));
|
||||
final TopDocs topDocs = searcher.search(termQuery, 1);
|
||||
|
@ -1831,7 +1833,7 @@ public class TestIndexSorting extends LuceneTestCase {
|
|||
NumericDocValues values = MultiDocValues.getNumericValues(reader, "id");
|
||||
assertEquals(topDocs.scoreDocs[0].doc, values.advance(topDocs.scoreDocs[0].doc));
|
||||
assertEquals(i, values.longValue());
|
||||
Document document = reader.document(topDocs.scoreDocs[0].doc);
|
||||
Document document = storedFields.document(topDocs.scoreDocs[0].doc);
|
||||
assertEquals(Integer.toString(i), document.get("id"));
|
||||
}
|
||||
}
|
||||
|
@ -2634,7 +2636,7 @@ public class TestIndexSorting extends LuceneTestCase {
|
|||
System.out.println("TEST: full index:");
|
||||
SortedDocValues docValues = MultiDocValues.getSortedValues(r2, "bytes");
|
||||
for(int i=0;i<r2.maxDoc();i++) {
|
||||
System.out.println(" doc " + i + " id=" + r2.document(i).get("id") + " bytes=" + docValues.get(i));
|
||||
System.out.println(" doc " + i + " id=" + r2.storedFields().document(i).get("id") + " bytes=" + docValues.get(i));
|
||||
}
|
||||
*/
|
||||
|
||||
|
@ -2665,10 +2667,13 @@ public class TestIndexSorting extends LuceneTestCase {
|
|||
}
|
||||
|
||||
assertEquals(hits2.scoreDocs.length, hits1.scoreDocs.length);
|
||||
StoredFields storedFields1 = r1.storedFields();
|
||||
StoredFields storedFields2 = r2.storedFields();
|
||||
for (int i = 0; i < hits2.scoreDocs.length; i++) {
|
||||
ScoreDoc hit1 = hits1.scoreDocs[i];
|
||||
ScoreDoc hit2 = hits2.scoreDocs[i];
|
||||
assertEquals(r1.document(hit1.doc).get("id"), r2.document(hit2.doc).get("id"));
|
||||
assertEquals(
|
||||
storedFields1.document(hit1.doc).get("id"), storedFields2.document(hit2.doc).get("id"));
|
||||
assertArrayEquals(((FieldDoc) hit1).fields, ((FieldDoc) hit2).fields);
|
||||
}
|
||||
}
|
||||
|
@ -2699,6 +2704,7 @@ public class TestIndexSorting extends LuceneTestCase {
|
|||
}
|
||||
w.forceMerge(1);
|
||||
DirectoryReader r = DirectoryReader.open(w);
|
||||
StoredFields storedFields = r.storedFields();
|
||||
for (int docID = 0; docID < 1000; docID++) {
|
||||
int expectedID;
|
||||
if (docID < 500) {
|
||||
|
@ -2706,7 +2712,8 @@ public class TestIndexSorting extends LuceneTestCase {
|
|||
} else {
|
||||
expectedID = docID - 500;
|
||||
}
|
||||
assertEquals(expectedID, r.document(docID).getField("id").numericValue().intValue());
|
||||
assertEquals(
|
||||
expectedID, storedFields.document(docID).getField("id").numericValue().intValue());
|
||||
}
|
||||
IOUtils.close(r, w, dir);
|
||||
}
|
||||
|
@ -2879,6 +2886,7 @@ public class TestIndexSorting extends LuceneTestCase {
|
|||
if (values == null) {
|
||||
continue;
|
||||
}
|
||||
StoredFields storedFields = leafCtx.reader().storedFields();
|
||||
for (int id = 0; id < leafCtx.reader().maxDoc(); id++) {
|
||||
if (liveDocs != null && liveDocs.get(id) == false) {
|
||||
continue;
|
||||
|
@ -2886,8 +2894,7 @@ public class TestIndexSorting extends LuceneTestCase {
|
|||
if (values.advanceExact(id) == false) {
|
||||
continue;
|
||||
}
|
||||
int globalId =
|
||||
Integer.parseInt(leafCtx.reader().document(id).getField("id").stringValue());
|
||||
int globalId = Integer.parseInt(storedFields.document(id).getField("id").stringValue());
|
||||
assertTrue(values.advanceExact(id));
|
||||
assertEquals(expectedValues[globalId], values.longValue());
|
||||
docCount++;
|
||||
|
|
|
@ -811,7 +811,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
w.close();
|
||||
|
||||
IndexReader r = DirectoryReader.open(dir);
|
||||
Terms tpv = r.getTermVectors(0).terms("field");
|
||||
Terms tpv = r.termVectors().get(0).terms("field");
|
||||
TermsEnum termsEnum = tpv.iterator();
|
||||
assertNotNull(termsEnum.next());
|
||||
PostingsEnum dpEnum = termsEnum.postings(null, PostingsEnum.ALL);
|
||||
|
@ -1208,20 +1208,21 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
w.close();
|
||||
|
||||
IndexReader ir = DirectoryReader.open(dir);
|
||||
Document doc2 = ir.document(0);
|
||||
StoredFields storedFields = ir.storedFields();
|
||||
Document doc2 = storedFields.document(0);
|
||||
IndexableField f3 = doc2.getField("binary");
|
||||
b = f3.binaryValue().bytes;
|
||||
assertTrue(b != null);
|
||||
assertEquals(17, b.length, 17);
|
||||
assertEquals(87, b[0]);
|
||||
|
||||
assertTrue(ir.document(0).getField("binary").binaryValue() != null);
|
||||
assertTrue(ir.document(1).getField("binary").binaryValue() != null);
|
||||
assertTrue(ir.document(2).getField("binary").binaryValue() != null);
|
||||
assertTrue(storedFields.document(0).getField("binary").binaryValue() != null);
|
||||
assertTrue(storedFields.document(1).getField("binary").binaryValue() != null);
|
||||
assertTrue(storedFields.document(2).getField("binary").binaryValue() != null);
|
||||
|
||||
assertEquals("value", ir.document(0).get("string"));
|
||||
assertEquals("value", ir.document(1).get("string"));
|
||||
assertEquals("value", ir.document(2).get("string"));
|
||||
assertEquals("value", storedFields.document(0).get("string"));
|
||||
assertEquals("value", storedFields.document(1).get("string"));
|
||||
assertEquals("value", storedFields.document(2).get("string"));
|
||||
|
||||
// test that the terms were indexed.
|
||||
assertTrue(
|
||||
|
@ -2112,9 +2113,10 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
LeafReader ar = leafReaderContext.reader();
|
||||
Bits liveDocs = ar.getLiveDocs();
|
||||
int maxDoc = ar.maxDoc();
|
||||
StoredFields storedFields = ar.storedFields();
|
||||
for (int i = 0; i < maxDoc; i++) {
|
||||
if (liveDocs == null || liveDocs.get(i)) {
|
||||
assertTrue(liveIds.remove(ar.document(i).get("id")));
|
||||
assertTrue(liveIds.remove(storedFields.document(i).get("id")));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2160,9 +2162,10 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
LeafReader ar = leafReaderContext.reader();
|
||||
Bits liveDocs = ar.getLiveDocs();
|
||||
int maxDoc = ar.maxDoc();
|
||||
StoredFields storedFields = ar.storedFields();
|
||||
for (int i = 0; i < maxDoc; i++) {
|
||||
if (liveDocs == null || liveDocs.get(i)) {
|
||||
assertTrue(liveIds.remove(ar.document(i).get("id")));
|
||||
assertTrue(liveIds.remove(storedFields.document(i).get("id")));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -3346,7 +3349,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
IndexSearcher searcher = new IndexSearcher(reader);
|
||||
TopDocs topDocs = searcher.search(new TermQuery(new Term("id", "1")), 10);
|
||||
assertEquals(1, topDocs.totalHits.value);
|
||||
Document document = reader.document(topDocs.scoreDocs[0].doc);
|
||||
Document document = reader.storedFields().document(topDocs.scoreDocs[0].doc);
|
||||
assertEquals("2", document.get("version"));
|
||||
|
||||
// update the on-disk version
|
||||
|
@ -3362,7 +3365,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
searcher = new IndexSearcher(reader);
|
||||
topDocs = searcher.search(new TermQuery(new Term("id", "1")), 10);
|
||||
assertEquals(1, topDocs.totalHits.value);
|
||||
document = reader.document(topDocs.scoreDocs[0].doc);
|
||||
document = reader.storedFields().document(topDocs.scoreDocs[0].doc);
|
||||
assertEquals("3", document.get("version"));
|
||||
|
||||
// now delete it
|
||||
|
|
|
@ -698,11 +698,13 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
int numDel = 0;
|
||||
final Bits liveDocs = MultiBits.getLiveDocs(reader);
|
||||
assertNotNull(liveDocs);
|
||||
StoredFields storedFields = reader.storedFields();
|
||||
TermVectors termVectors = reader.termVectors();
|
||||
for (int j = 0; j < reader.maxDoc(); j++) {
|
||||
if (!liveDocs.get(j)) numDel++;
|
||||
else {
|
||||
reader.document(j);
|
||||
reader.getTermVectors(j);
|
||||
storedFields.document(j);
|
||||
termVectors.get(j);
|
||||
}
|
||||
}
|
||||
assertEquals(1, numDel);
|
||||
|
@ -722,9 +724,11 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
assertEquals(expected, reader.maxDoc());
|
||||
int numDel = 0;
|
||||
assertNull(MultiBits.getLiveDocs(reader));
|
||||
StoredFields storedFields = reader.storedFields();
|
||||
TermVectors termVectors = reader.termVectors();
|
||||
for (int j = 0; j < reader.maxDoc(); j++) {
|
||||
reader.document(j);
|
||||
reader.getTermVectors(j);
|
||||
storedFields.document(j);
|
||||
termVectors.get(j);
|
||||
}
|
||||
reader.close();
|
||||
assertEquals(0, numDel);
|
||||
|
@ -879,11 +883,13 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
int numDel = 0;
|
||||
final Bits liveDocs = MultiBits.getLiveDocs(reader);
|
||||
assertNotNull(liveDocs);
|
||||
StoredFields storedFields = reader.storedFields();
|
||||
TermVectors termVectors = reader.termVectors();
|
||||
for (int j = 0; j < reader.maxDoc(); j++) {
|
||||
if (!liveDocs.get(j)) numDel++;
|
||||
else {
|
||||
reader.document(j);
|
||||
reader.getTermVectors(j);
|
||||
storedFields.document(j);
|
||||
termVectors.get(j);
|
||||
}
|
||||
}
|
||||
reader.close();
|
||||
|
@ -903,9 +909,11 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
assertEquals(expected, reader.docFreq(new Term("contents", "here")));
|
||||
assertEquals(expected, reader.maxDoc());
|
||||
assertNull(MultiBits.getLiveDocs(reader));
|
||||
storedFields = reader.storedFields();
|
||||
termVectors = reader.termVectors();
|
||||
for (int j = 0; j < reader.maxDoc(); j++) {
|
||||
reader.document(j);
|
||||
reader.getTermVectors(j);
|
||||
storedFields.document(j);
|
||||
termVectors.get(j);
|
||||
}
|
||||
reader.close();
|
||||
|
||||
|
|
|
@ -76,8 +76,9 @@ public class TestIndexWriterMerging extends LuceneTestCase {
|
|||
IndexReader reader = DirectoryReader.open(directory);
|
||||
|
||||
int max = reader.maxDoc();
|
||||
StoredFields storedFields = reader.storedFields();
|
||||
for (int i = 0; i < max; i++) {
|
||||
Document temp = reader.document(i);
|
||||
Document temp = storedFields.document(i);
|
||||
// System.out.println("doc "+i+"="+temp.getField("count").stringValue());
|
||||
// compare the index doc number to the value that it should be
|
||||
if (!temp.getField("count").stringValue().equals((i + startAt) + "")) {
|
||||
|
|
|
@ -155,9 +155,9 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
|||
DirectoryReader r1 = DirectoryReader.open(writer);
|
||||
assertTrue(r1.isCurrent());
|
||||
|
||||
String id10 = r1.document(10).getField("id").stringValue();
|
||||
String id10 = r1.storedFields().document(10).getField("id").stringValue();
|
||||
|
||||
Document newDoc = r1.document(10);
|
||||
Document newDoc = r1.storedFields().document(10);
|
||||
newDoc.removeField("id");
|
||||
newDoc.add(new Field("id", Integer.toString(8000), DocHelper.STRING_TYPE_STORED_WITH_TVS));
|
||||
writer.updateDocument(new Term("id", id10), newDoc);
|
||||
|
@ -287,9 +287,9 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
|||
assertEquals(100, index2df);
|
||||
|
||||
// verify the docs are from different indexes
|
||||
Document doc5 = r1.document(5);
|
||||
Document doc5 = r1.storedFields().document(5);
|
||||
assertEquals("index1", doc5.get("indexname"));
|
||||
Document doc150 = r1.document(150);
|
||||
Document doc150 = r1.storedFields().document(150);
|
||||
assertEquals("index2", doc150.get("indexname"));
|
||||
r1.close();
|
||||
writer.close();
|
||||
|
@ -345,7 +345,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
|||
// get a reader
|
||||
IndexReader r1 = DirectoryReader.open(writer);
|
||||
|
||||
String id10 = r1.document(10).getField("id").stringValue();
|
||||
String id10 = r1.storedFields().document(10).getField("id").stringValue();
|
||||
|
||||
// deleted IW docs should not show up in the next getReader
|
||||
writer.deleteDocuments(new Term("id", id10));
|
||||
|
@ -353,7 +353,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
|||
assertEquals(1, count(new Term("id", id10), r1));
|
||||
assertEquals(0, count(new Term("id", id10), r2));
|
||||
|
||||
String id50 = r1.document(50).getField("id").stringValue();
|
||||
String id50 = r1.storedFields().document(50).getField("id").stringValue();
|
||||
assertEquals(1, count(new Term("id", id50), r1));
|
||||
|
||||
writer.deleteDocuments(new Term("id", id50));
|
||||
|
@ -362,7 +362,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
|||
assertEquals(0, count(new Term("id", id10), r3));
|
||||
assertEquals(0, count(new Term("id", id50), r3));
|
||||
|
||||
String id75 = r1.document(75).getField("id").stringValue();
|
||||
String id75 = r1.storedFields().document(75).getField("id").stringValue();
|
||||
writer.deleteDocuments(new TermQuery(new Term("id", id75)));
|
||||
IndexReader r4 = DirectoryReader.open(writer);
|
||||
assertEquals(1, count(new Term("id", id75), r3));
|
||||
|
@ -608,7 +608,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
|||
*
|
||||
* public static int deleteDocument(Term term, IndexWriter writer) throws
|
||||
* IOException { IndexReader reader = writer.getReader(); TermDocs td =
|
||||
* reader.termDocs(term); int doc = -1; //if (td.next()) { // doc = td.doc();
|
||||
* reader.termDocs(term); int doc = -1; //if (td.next()) { // doc = td.storedFields().document();
|
||||
* //} //writer.deleteDocuments(term); td.close(); return doc; }
|
||||
*/
|
||||
|
||||
|
|
|
@ -255,7 +255,7 @@ public class TestIndexWriterUnicode extends LuceneTestCase {
|
|||
w.close();
|
||||
|
||||
IndexReader ir = DirectoryReader.open(dir);
|
||||
Document doc2 = ir.document(0);
|
||||
Document doc2 = ir.storedFields().document(0);
|
||||
for (int i = 0; i < count; i++) {
|
||||
assertEquals(
|
||||
"field " + i + " was not indexed correctly",
|
||||
|
|
|
@ -324,10 +324,12 @@ public class TestIndexWriterWithThreads extends LuceneTestCase {
|
|||
if (success) {
|
||||
IndexReader reader = DirectoryReader.open(dir);
|
||||
final Bits delDocs = MultiBits.getLiveDocs(reader);
|
||||
StoredFields storedFields = reader.storedFields();
|
||||
TermVectors termVectors = reader.termVectors();
|
||||
for (int j = 0; j < reader.maxDoc(); j++) {
|
||||
if (delDocs == null || !delDocs.get(j)) {
|
||||
reader.document(j);
|
||||
reader.getTermVectors(j);
|
||||
storedFields.document(j);
|
||||
termVectors.get(j);
|
||||
}
|
||||
}
|
||||
reader.close();
|
||||
|
|
|
@ -251,6 +251,8 @@ public class TestIndexableField extends LuceneTestCase {
|
|||
w.close();
|
||||
|
||||
final IndexSearcher s = newSearcher(r);
|
||||
StoredFields storedFields = s.storedFields();
|
||||
TermVectors termVectors = r.termVectors();
|
||||
int counter = 0;
|
||||
for (int id = 0; id < NUM_DOCS; id++) {
|
||||
if (VERBOSE) {
|
||||
|
@ -261,7 +263,7 @@ public class TestIndexableField extends LuceneTestCase {
|
|||
final TopDocs hits = s.search(new TermQuery(new Term("id", "" + id)), 1);
|
||||
assertEquals(1, hits.totalHits.value);
|
||||
final int docID = hits.scoreDocs[0].doc;
|
||||
final Document doc = s.doc(docID);
|
||||
final Document doc = storedFields.document(docID);
|
||||
final int endCounter = counter + fieldsPerDoc[id];
|
||||
while (counter < endCounter) {
|
||||
final String name = "f" + counter;
|
||||
|
@ -299,7 +301,7 @@ public class TestIndexableField extends LuceneTestCase {
|
|||
if (indexed) {
|
||||
final boolean tv = counter % 2 == 1 && fieldID != 9;
|
||||
if (tv) {
|
||||
final Terms tfv = r.getTermVectors(docID).terms(name);
|
||||
final Terms tfv = termVectors.get(docID).terms(name);
|
||||
assertNotNull(tfv);
|
||||
TermsEnum termsEnum = tfv.iterator();
|
||||
assertEquals(newBytesRef("" + counter), termsEnum.next());
|
||||
|
@ -321,7 +323,7 @@ public class TestIndexableField extends LuceneTestCase {
|
|||
// TODO: offsets
|
||||
|
||||
} else {
|
||||
Fields vectors = r.getTermVectors(docID);
|
||||
Fields vectors = termVectors.get(docID);
|
||||
assertTrue(vectors == null || vectors.terms(name) == null);
|
||||
}
|
||||
|
||||
|
|
|
@ -128,7 +128,7 @@ public class TestIndexingSequenceNumbers extends LuceneTestCase {
|
|||
IndexSearcher s = newSearcher(r);
|
||||
TopDocs hits = s.search(new TermQuery(id), 1);
|
||||
assertEquals("maxDoc: " + r.maxDoc(), 1, hits.totalHits.value);
|
||||
Document doc = r.document(hits.scoreDocs[0].doc);
|
||||
Document doc = r.storedFields().document(hits.scoreDocs[0].doc);
|
||||
assertEquals(maxThread, doc.getField("thread").numericValue().intValue());
|
||||
r.close();
|
||||
w.close();
|
||||
|
@ -277,7 +277,7 @@ public class TestIndexingSequenceNumbers extends LuceneTestCase {
|
|||
|
||||
if (expectedThreadIDs[id] != -1) {
|
||||
assertEquals(1, hits.totalHits.value);
|
||||
Document doc = r.document(hits.scoreDocs[0].doc);
|
||||
Document doc = r.storedFields().document(hits.scoreDocs[0].doc);
|
||||
int actualThreadID = doc.getField("thread").numericValue().intValue();
|
||||
if (expectedThreadIDs[id] != actualThreadID) {
|
||||
System.out.println(
|
||||
|
@ -688,19 +688,21 @@ public class TestIndexingSequenceNumbers extends LuceneTestCase {
|
|||
}
|
||||
}
|
||||
TopDocs hits = s.search(new TermQuery(new Term("id", "" + id)), 1 + actualCount);
|
||||
StoredFields storedFields = s.storedFields();
|
||||
for (ScoreDoc hit : hits.scoreDocs) {
|
||||
System.out.println(" hit: " + s.doc(hit.doc).get("threadop"));
|
||||
System.out.println(" hit: " + storedFields.document(hit.doc).get("threadop"));
|
||||
}
|
||||
|
||||
for (LeafReaderContext ctx : r.leaves()) {
|
||||
System.out.println(" sub=" + ctx.reader());
|
||||
Bits liveDocs = ctx.reader().getLiveDocs();
|
||||
storedFields = ctx.reader().storedFields();
|
||||
for (int docID = 0; docID < ctx.reader().maxDoc(); docID++) {
|
||||
System.out.println(
|
||||
" docID="
|
||||
+ docID
|
||||
+ " threadop="
|
||||
+ ctx.reader().document(docID).get("threadop")
|
||||
+ storedFields.document(docID).get("threadop")
|
||||
+ (liveDocs != null && liveDocs.get(docID) == false ? " (deleted)" : ""));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -397,10 +397,10 @@ public class TestKnnGraph extends LuceneTestCase {
|
|||
try {
|
||||
KnnVectorQuery query = new KnnVectorQuery("vector", new float[] {0f, 0.1f}, 5);
|
||||
TopDocs results = searcher.search(query, 5);
|
||||
StoredFields storedFields = searcher.storedFields();
|
||||
for (ScoreDoc doc : results.scoreDocs) {
|
||||
// map docId to insertion id
|
||||
doc.doc =
|
||||
Integer.parseInt(searcher.getIndexReader().document(doc.doc).get("id"));
|
||||
doc.doc = Integer.parseInt(storedFields.document(doc.doc).get("id"));
|
||||
}
|
||||
assertResults(new int[] {0, 15, 3, 18, 5}, results);
|
||||
} finally {
|
||||
|
@ -423,9 +423,10 @@ public class TestKnnGraph extends LuceneTestCase {
|
|||
private void assertGraphSearch(int[] expected, float[] vector, IndexReader reader)
|
||||
throws IOException {
|
||||
TopDocs results = doKnnSearch(reader, vector, 5);
|
||||
StoredFields storedFields = reader.storedFields();
|
||||
for (ScoreDoc doc : results.scoreDocs) {
|
||||
// map docId to insertion id
|
||||
doc.doc = Integer.parseInt(reader.document(doc.doc).get("id"));
|
||||
doc.doc = Integer.parseInt(storedFields.document(doc.doc).get("id"));
|
||||
}
|
||||
assertResults(expected, results);
|
||||
}
|
||||
|
@ -489,17 +490,18 @@ public class TestKnnGraph extends LuceneTestCase {
|
|||
// assert vector values:
|
||||
// stored vector values are the same as original
|
||||
int nextDocWithVectors = 0;
|
||||
StoredFields storedFields = reader.storedFields();
|
||||
for (int i = 0; i < reader.maxDoc(); i++) {
|
||||
nextDocWithVectors = vectorValues.advance(i);
|
||||
while (i < nextDocWithVectors && i < reader.maxDoc()) {
|
||||
int id = Integer.parseInt(reader.document(i).get("id"));
|
||||
int id = Integer.parseInt(storedFields.document(i).get("id"));
|
||||
assertNull("document " + id + " has no vector, but was expected to", values[id]);
|
||||
++i;
|
||||
}
|
||||
if (nextDocWithVectors == NO_MORE_DOCS) {
|
||||
break;
|
||||
}
|
||||
int id = Integer.parseInt(reader.document(i).get("id"));
|
||||
int id = Integer.parseInt(storedFields.document(i).get("id"));
|
||||
// documents with KnnGraphValues have the expected vectors
|
||||
float[] scratch = vectorValues.vectorValue();
|
||||
assertArrayEquals(
|
||||
|
|
|
@ -307,7 +307,8 @@ public class TestMixedDocValuesUpdates extends LuceneTestCase {
|
|||
long bdvValue = TestBinaryDocValuesUpdates.getValue(bdv) * 2;
|
||||
// if (ctrlValue != bdvValue) {
|
||||
// System.out.println("seg=" + r + ", f=f" + i + ", doc=" + j + ",
|
||||
// group=" + r.document(j).get("updKey") + ", ctrlValue=" + ctrlValue + ", bdvBytes=" +
|
||||
// group=" + r.storedFields().document(j).get("updKey") + ", ctrlValue=" + ctrlValue +
|
||||
// ", bdvBytes=" +
|
||||
// scratch);
|
||||
// }
|
||||
assertEquals(ctrlValue, bdvValue);
|
||||
|
@ -676,8 +677,9 @@ public class TestMixedDocValuesUpdates extends LuceneTestCase {
|
|||
|
||||
TopDocs is_live = searcher.search(new FieldExistsQuery("is_live"), 5);
|
||||
assertEquals(numHits, is_live.totalHits.value);
|
||||
StoredFields storedFields = reader.storedFields();
|
||||
for (ScoreDoc doc : is_live.scoreDocs) {
|
||||
int id = Integer.parseInt(reader.document(doc.doc).get("id"));
|
||||
int id = Integer.parseInt(storedFields.document(doc.doc).get("id"));
|
||||
int i = ReaderUtil.subIndex(doc.doc, reader.leaves());
|
||||
assertTrue(i >= 0);
|
||||
LeafReaderContext leafReaderContext = reader.leaves().get(i);
|
||||
|
|
|
@ -48,8 +48,9 @@ public class TestNorms extends LuceneTestCase {
|
|||
DirectoryReader open = DirectoryReader.open(dir);
|
||||
NumericDocValues normValues = MultiDocValues.getNormValues(open, BYTE_TEST_FIELD);
|
||||
assertNotNull(normValues);
|
||||
StoredFields storedFields = open.storedFields();
|
||||
for (int i = 0; i < open.maxDoc(); i++) {
|
||||
Document document = open.document(i);
|
||||
Document document = storedFields.document(i);
|
||||
int expected = Integer.parseInt(document.get(BYTE_TEST_FIELD).split(" ")[0]);
|
||||
assertEquals(i, normValues.nextDoc());
|
||||
assertEquals(expected, normValues.longValue());
|
||||
|
|
|
@ -755,8 +755,9 @@ public class TestNumericDocValuesUpdates extends LuceneTestCase {
|
|||
if (VERBOSE) {
|
||||
System.out.println("TEST: maxDoc=" + r.maxDoc());
|
||||
}
|
||||
StoredFields storedFields = r.storedFields();
|
||||
for (int i = 0; i < r.maxDoc(); i++) {
|
||||
Document rdoc = r.document(i);
|
||||
Document rdoc = storedFields.document(i);
|
||||
assertEquals(i, ndv.nextDoc());
|
||||
assertEquals("docid=" + i + " has wrong ndv value; doc=" + rdoc, value, ndv.longValue());
|
||||
}
|
||||
|
@ -904,11 +905,12 @@ public class TestNumericDocValuesUpdates extends LuceneTestCase {
|
|||
NumericDocValues values = leafReader.getNumericDocValues("number");
|
||||
NumericDocValues sortValues = leafReader.getNumericDocValues("sort");
|
||||
Bits liveDocs = leafReader.getLiveDocs();
|
||||
StoredFields storedFields = leafReader.storedFields();
|
||||
|
||||
long lastSortValue = Long.MIN_VALUE;
|
||||
for (int i = 0; i < leafReader.maxDoc(); i++) {
|
||||
|
||||
Document doc = leafReader.document(i);
|
||||
Document doc = storedFields.document(i);
|
||||
OneSortDoc sortDoc = docs.get(Integer.parseInt(doc.get("id")));
|
||||
|
||||
assertEquals(i, values.nextDoc());
|
||||
|
@ -1027,6 +1029,7 @@ public class TestNumericDocValuesUpdates extends LuceneTestCase {
|
|||
for (LeafReaderContext context : reader.leaves()) {
|
||||
LeafReader r = context.reader();
|
||||
Bits liveDocs = r.getLiveDocs();
|
||||
StoredFields storedFields = r.storedFields();
|
||||
for (int field = 0; field < fieldValues.length; field++) {
|
||||
String f = "f" + field;
|
||||
NumericDocValues ndv = r.getNumericDocValues(f);
|
||||
|
@ -1039,13 +1042,13 @@ public class TestNumericDocValuesUpdates extends LuceneTestCase {
|
|||
"invalid value for docID="
|
||||
+ doc
|
||||
+ " id="
|
||||
+ r.document(doc).get("id")
|
||||
+ storedFields.document(doc).get("id")
|
||||
+ ", field="
|
||||
+ f
|
||||
+ ", reader="
|
||||
+ r
|
||||
+ " doc="
|
||||
+ r.document(doc),
|
||||
+ storedFields.document(doc),
|
||||
fieldValues[field],
|
||||
ndv.longValue());
|
||||
}
|
||||
|
|
|
@ -209,13 +209,13 @@ public class TestParallelCompositeReader extends LuceneTestCase {
|
|||
expectThrows(
|
||||
AlreadyClosedException.class,
|
||||
() -> {
|
||||
psub.document(0);
|
||||
psub.storedFields().document(0);
|
||||
});
|
||||
|
||||
expectThrows(
|
||||
AlreadyClosedException.class,
|
||||
() -> {
|
||||
pr.document(0);
|
||||
pr.storedFields().document(0);
|
||||
});
|
||||
|
||||
// noop:
|
||||
|
@ -299,10 +299,10 @@ public class TestParallelCompositeReader extends LuceneTestCase {
|
|||
ParallelCompositeReader pr =
|
||||
new ParallelCompositeReader(
|
||||
false, new CompositeReader[] {ir1, ir2}, new CompositeReader[] {ir1});
|
||||
assertEquals("v1", pr.document(0).get("f1"));
|
||||
assertEquals("v1", pr.document(0).get("f2"));
|
||||
assertNull(pr.document(0).get("f3"));
|
||||
assertNull(pr.document(0).get("f4"));
|
||||
assertEquals("v1", pr.storedFields().document(0).get("f1"));
|
||||
assertEquals("v1", pr.storedFields().document(0).get("f2"));
|
||||
assertNull(pr.storedFields().document(0).get("f3"));
|
||||
assertNull(pr.storedFields().document(0).get("f4"));
|
||||
// check that fields are there
|
||||
assertNotNull(MultiTerms.getTerms(pr, "f1"));
|
||||
assertNotNull(MultiTerms.getTerms(pr, "f2"));
|
||||
|
@ -312,10 +312,10 @@ public class TestParallelCompositeReader extends LuceneTestCase {
|
|||
|
||||
// no stored fields at all
|
||||
pr = new ParallelCompositeReader(false, new CompositeReader[] {ir2}, new CompositeReader[0]);
|
||||
assertNull(pr.document(0).get("f1"));
|
||||
assertNull(pr.document(0).get("f2"));
|
||||
assertNull(pr.document(0).get("f3"));
|
||||
assertNull(pr.document(0).get("f4"));
|
||||
assertNull(pr.storedFields().document(0).get("f1"));
|
||||
assertNull(pr.storedFields().document(0).get("f2"));
|
||||
assertNull(pr.storedFields().document(0).get("f3"));
|
||||
assertNull(pr.storedFields().document(0).get("f4"));
|
||||
// check that fields are there
|
||||
assertNull(MultiTerms.getTerms(pr, "f1"));
|
||||
assertNull(MultiTerms.getTerms(pr, "f2"));
|
||||
|
@ -326,10 +326,10 @@ public class TestParallelCompositeReader extends LuceneTestCase {
|
|||
// without overlapping
|
||||
pr =
|
||||
new ParallelCompositeReader(true, new CompositeReader[] {ir2}, new CompositeReader[] {ir1});
|
||||
assertEquals("v1", pr.document(0).get("f1"));
|
||||
assertEquals("v1", pr.document(0).get("f2"));
|
||||
assertNull(pr.document(0).get("f3"));
|
||||
assertNull(pr.document(0).get("f4"));
|
||||
assertEquals("v1", pr.storedFields().document(0).get("f1"));
|
||||
assertEquals("v1", pr.storedFields().document(0).get("f2"));
|
||||
assertNull(pr.storedFields().document(0).get("f3"));
|
||||
assertNull(pr.storedFields().document(0).get("f4"));
|
||||
// check that fields are there
|
||||
assertNull(MultiTerms.getTerms(pr, "f1"));
|
||||
assertNull(MultiTerms.getTerms(pr, "f2"));
|
||||
|
@ -380,10 +380,12 @@ public class TestParallelCompositeReader extends LuceneTestCase {
|
|||
ScoreDoc[] parallelHits = parallel.search(query, 1000).scoreDocs;
|
||||
ScoreDoc[] singleHits = single.search(query, 1000).scoreDocs;
|
||||
assertEquals(parallelHits.length, singleHits.length);
|
||||
StoredFields parallelFields = parallel.storedFields();
|
||||
StoredFields singleFields = single.storedFields();
|
||||
for (int i = 0; i < parallelHits.length; i++) {
|
||||
assertEquals(parallelHits[i].score, singleHits[i].score, 0.001f);
|
||||
Document docParallel = parallel.doc(parallelHits[i].doc);
|
||||
Document docSingle = single.doc(singleHits[i].doc);
|
||||
Document docParallel = parallelFields.document(parallelHits[i].doc);
|
||||
Document docSingle = singleFields.document(singleHits[i].doc);
|
||||
assertEquals(docParallel.get("f1"), docSingle.get("f1"));
|
||||
assertEquals(docParallel.get("f2"), docSingle.get("f2"));
|
||||
assertEquals(docParallel.get("f3"), docSingle.get("f3"));
|
||||
|
|
|
@ -144,7 +144,7 @@ public class TestParallelLeafReader extends LuceneTestCase {
|
|||
expectThrows(
|
||||
AlreadyClosedException.class,
|
||||
() -> {
|
||||
pr.document(0);
|
||||
pr.storedFields().document(0);
|
||||
});
|
||||
|
||||
// noop:
|
||||
|
@ -200,10 +200,10 @@ public class TestParallelLeafReader extends LuceneTestCase {
|
|||
// with overlapping
|
||||
ParallelLeafReader pr =
|
||||
new ParallelLeafReader(false, new LeafReader[] {ir1, ir2}, new LeafReader[] {ir1});
|
||||
assertEquals("v1", pr.document(0).get("f1"));
|
||||
assertEquals("v1", pr.document(0).get("f2"));
|
||||
assertNull(pr.document(0).get("f3"));
|
||||
assertNull(pr.document(0).get("f4"));
|
||||
assertEquals("v1", pr.storedFields().document(0).get("f1"));
|
||||
assertEquals("v1", pr.storedFields().document(0).get("f2"));
|
||||
assertNull(pr.storedFields().document(0).get("f3"));
|
||||
assertNull(pr.storedFields().document(0).get("f4"));
|
||||
// check that fields are there
|
||||
assertNotNull(pr.terms("f1"));
|
||||
assertNotNull(pr.terms("f2"));
|
||||
|
@ -213,10 +213,10 @@ public class TestParallelLeafReader extends LuceneTestCase {
|
|||
|
||||
// no stored fields at all
|
||||
pr = new ParallelLeafReader(false, new LeafReader[] {ir2}, new LeafReader[0]);
|
||||
assertNull(pr.document(0).get("f1"));
|
||||
assertNull(pr.document(0).get("f2"));
|
||||
assertNull(pr.document(0).get("f3"));
|
||||
assertNull(pr.document(0).get("f4"));
|
||||
assertNull(pr.storedFields().document(0).get("f1"));
|
||||
assertNull(pr.storedFields().document(0).get("f2"));
|
||||
assertNull(pr.storedFields().document(0).get("f3"));
|
||||
assertNull(pr.storedFields().document(0).get("f4"));
|
||||
// check that fields are there
|
||||
assertNull(pr.terms("f1"));
|
||||
assertNull(pr.terms("f2"));
|
||||
|
@ -226,10 +226,10 @@ public class TestParallelLeafReader extends LuceneTestCase {
|
|||
|
||||
// without overlapping
|
||||
pr = new ParallelLeafReader(true, new LeafReader[] {ir2}, new LeafReader[] {ir1});
|
||||
assertEquals("v1", pr.document(0).get("f1"));
|
||||
assertEquals("v1", pr.document(0).get("f2"));
|
||||
assertNull(pr.document(0).get("f3"));
|
||||
assertNull(pr.document(0).get("f4"));
|
||||
assertEquals("v1", pr.storedFields().document(0).get("f1"));
|
||||
assertEquals("v1", pr.storedFields().document(0).get("f2"));
|
||||
assertNull(pr.storedFields().document(0).get("f3"));
|
||||
assertNull(pr.storedFields().document(0).get("f4"));
|
||||
// check that fields are there
|
||||
assertNull(pr.terms("f1"));
|
||||
assertNull(pr.terms("f2"));
|
||||
|
@ -252,10 +252,12 @@ public class TestParallelLeafReader extends LuceneTestCase {
|
|||
ScoreDoc[] parallelHits = parallel.search(query, 1000).scoreDocs;
|
||||
ScoreDoc[] singleHits = single.search(query, 1000).scoreDocs;
|
||||
assertEquals(parallelHits.length, singleHits.length);
|
||||
StoredFields parallelFields = parallel.storedFields();
|
||||
StoredFields singleFields = single.storedFields();
|
||||
for (int i = 0; i < parallelHits.length; i++) {
|
||||
assertEquals(parallelHits[i].score, singleHits[i].score, 0.001f);
|
||||
Document docParallel = parallel.doc(parallelHits[i].doc);
|
||||
Document docSingle = single.doc(singleHits[i].doc);
|
||||
Document docParallel = parallelFields.document(parallelHits[i].doc);
|
||||
Document docSingle = singleFields.document(singleHits[i].doc);
|
||||
assertEquals(docParallel.get("f1"), docSingle.get("f1"));
|
||||
assertEquals(docParallel.get("f2"), docSingle.get("f2"));
|
||||
assertEquals(docParallel.get("f3"), docSingle.get("f3"));
|
||||
|
|
|
@ -67,7 +67,7 @@ public class TestPayloadsOnVectors extends LuceneTestCase {
|
|||
writer.addDocument(doc);
|
||||
|
||||
DirectoryReader reader = writer.getReader();
|
||||
Terms terms = reader.getTermVector(1, "field");
|
||||
Terms terms = reader.termVectors().get(1, "field");
|
||||
assert terms != null;
|
||||
TermsEnum termsEnum = terms.iterator();
|
||||
assertTrue(termsEnum.seekExact(new BytesRef("withPayload")));
|
||||
|
@ -109,7 +109,7 @@ public class TestPayloadsOnVectors extends LuceneTestCase {
|
|||
doc.add(field3);
|
||||
writer.addDocument(doc);
|
||||
DirectoryReader reader = writer.getReader();
|
||||
Terms terms = reader.getTermVector(0, "field");
|
||||
Terms terms = reader.termVectors().get(0, "field");
|
||||
assert terms != null;
|
||||
TermsEnum termsEnum = terms.iterator();
|
||||
assertTrue(termsEnum.seekExact(new BytesRef("withPayload")));
|
||||
|
|
|
@ -156,9 +156,10 @@ public class TestPostingsOffsets extends LuceneTestCase {
|
|||
|
||||
for (String term : terms) {
|
||||
PostingsEnum dp = MultiTerms.getTermPostingsEnum(reader, "numbers", new BytesRef(term));
|
||||
StoredFields storedFields = reader.storedFields();
|
||||
int doc;
|
||||
while ((doc = dp.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
|
||||
String storedNumbers = reader.document(doc).get("numbers");
|
||||
String storedNumbers = storedFields.document(doc).get("numbers");
|
||||
int freq = dp.freq();
|
||||
for (int i = 0; i < freq; i++) {
|
||||
dp.nextPosition();
|
||||
|
@ -184,11 +185,12 @@ public class TestPostingsOffsets extends LuceneTestCase {
|
|||
for (int j = 0; j < numSkippingTests; j++) {
|
||||
int num = TestUtil.nextInt(random(), 100, Math.min(numDocs - 1, 999));
|
||||
PostingsEnum dp = MultiTerms.getTermPostingsEnum(reader, "numbers", new BytesRef("hundred"));
|
||||
StoredFields storedFields = reader.storedFields();
|
||||
int doc = dp.advance(num);
|
||||
assertEquals(num, doc);
|
||||
int freq = dp.freq();
|
||||
for (int i = 0; i < freq; i++) {
|
||||
String storedNumbers = reader.document(doc).get("numbers");
|
||||
String storedNumbers = storedFields.document(doc).get("numbers");
|
||||
dp.nextPosition();
|
||||
int start = dp.startOffset();
|
||||
assert start >= 0;
|
||||
|
|
|
@ -89,8 +89,9 @@ public class TestReadOnlyIndex extends LuceneTestCase {
|
|||
TopDocs hits = isearcher.search(query, 1);
|
||||
assertEquals(1, hits.totalHits.value);
|
||||
// Iterate through the results:
|
||||
StoredFields storedFields = isearcher.storedFields();
|
||||
for (int i = 0; i < hits.scoreDocs.length; i++) {
|
||||
Document hitDoc = isearcher.doc(hits.scoreDocs[i].doc);
|
||||
Document hitDoc = storedFields.document(hits.scoreDocs[i].doc);
|
||||
assertEquals(text, hitDoc.get("fieldname"));
|
||||
}
|
||||
|
||||
|
|
|
@ -117,12 +117,12 @@ public class TestSegmentMerger extends LuceneTestCase {
|
|||
newIOContext(random()));
|
||||
assertTrue(mergedReader != null);
|
||||
assertTrue(mergedReader.numDocs() == 2);
|
||||
Document newDoc1 = mergedReader.document(0);
|
||||
Document newDoc1 = mergedReader.storedFields().document(0);
|
||||
assertTrue(newDoc1 != null);
|
||||
// There are 2 unstored fields on the document
|
||||
assertTrue(
|
||||
DocHelper.numFields(newDoc1) == DocHelper.numFields(doc1) - DocHelper.unstored.size());
|
||||
Document newDoc2 = mergedReader.document(1);
|
||||
Document newDoc2 = mergedReader.storedFields().document(1);
|
||||
assertTrue(newDoc2 != null);
|
||||
assertTrue(
|
||||
DocHelper.numFields(newDoc2) == DocHelper.numFields(doc2) - DocHelper.unstored.size());
|
||||
|
@ -143,7 +143,7 @@ public class TestSegmentMerger extends LuceneTestCase {
|
|||
// System.out.println("stored size: " + stored.size());
|
||||
assertEquals("We do not have 3 fields that were indexed with term vector", 3, tvCount);
|
||||
|
||||
Terms vector = mergedReader.getTermVectors(0).terms(DocHelper.TEXT_FIELD_2_KEY);
|
||||
Terms vector = mergedReader.termVectors().get(0).terms(DocHelper.TEXT_FIELD_2_KEY);
|
||||
assertNotNull(vector);
|
||||
assertEquals(3, vector.size());
|
||||
TermsEnum termsEnum = vector.iterator();
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue