mirror of https://github.com/apache/lucene.git
LUCENE-8671: Load FST off-heap if reader is not opened from an index writer (#610)
Today we never load FSTs of ID-like fields off-heap since we need very fast access for updates. Yet, a reader that is not loaded from an IndexWriter can also leave the FST on disk. This change adds this information to SegmentReadState to allow the postings format to make this decision without configuration.
This commit is contained in:
parent
2a1ed6e484
commit
14175c46d2
|
@ -68,6 +68,11 @@ Improvements
|
|||
* LUCENE-8631: The Korean's user dictionary now picks the longest-matching word and discards
|
||||
the other matches. (Yeongsu Kim via Jim Ferenczi)
|
||||
|
||||
Changes in Runtime Behavior
|
||||
|
||||
* LUCENE-8671: Load FST off-heap also for ID-like fields if reader is not opened
|
||||
from an IndexWriter. (Simon Willnauer)
|
||||
|
||||
Other
|
||||
|
||||
* LUCENE-8680: Refactor EdgeTree#relateTriangle method. (Ignacio Vera)
|
||||
|
|
|
@ -193,7 +193,7 @@ public final class BlockTreeTermsReader extends FieldsProducer {
|
|||
final long indexStartFP = indexIn.readVLong();
|
||||
FieldReader previous = fields.put(fieldInfo.name,
|
||||
new FieldReader(this, fieldInfo, numTerms, rootCode, sumTotalTermFreq, sumDocFreq, docCount,
|
||||
indexStartFP, longsSize, indexIn, minTerm, maxTerm));
|
||||
indexStartFP, longsSize, indexIn, minTerm, maxTerm, state.openedFromWriter));
|
||||
if (previous != null) {
|
||||
throw new CorruptIndexException("duplicate field: " + fieldInfo.name, termsIn);
|
||||
}
|
||||
|
|
|
@ -63,10 +63,11 @@ public final class FieldReader extends Terms implements Accountable {
|
|||
final BlockTreeTermsReader parent;
|
||||
|
||||
final FST<BytesRef> index;
|
||||
final boolean isFSTOffHeap;
|
||||
//private boolean DEBUG;
|
||||
|
||||
FieldReader(BlockTreeTermsReader parent, FieldInfo fieldInfo, long numTerms, BytesRef rootCode, long sumTotalTermFreq, long sumDocFreq, int docCount,
|
||||
long indexStartFP, int longsSize, IndexInput indexIn, BytesRef minTerm, BytesRef maxTerm) throws IOException {
|
||||
long indexStartFP, int longsSize, IndexInput indexIn, BytesRef minTerm, BytesRef maxTerm, boolean openedFromWriter) throws IOException {
|
||||
assert numTerms > 0;
|
||||
this.fieldInfo = fieldInfo;
|
||||
//DEBUG = BlockTreeTermsReader.DEBUG && fieldInfo.name.equals("id");
|
||||
|
@ -91,7 +92,8 @@ public final class FieldReader extends Terms implements Accountable {
|
|||
clone.seek(indexStartFP);
|
||||
// Initialize FST offheap if index is MMapDirectory and
|
||||
// docCount != sumDocFreq implying field is not primary key
|
||||
if (clone instanceof ByteBufferIndexInput && this.docCount != this.sumDocFreq) {
|
||||
isFSTOffHeap = clone instanceof ByteBufferIndexInput && ((this.docCount != this.sumDocFreq) || openedFromWriter == false);
|
||||
if (isFSTOffHeap) {
|
||||
index = new FST<>(clone, ByteSequenceOutputs.getSingleton(), new OffHeapFSTStore());
|
||||
} else {
|
||||
index = new FST<>(clone, ByteSequenceOutputs.getSingleton());
|
||||
|
@ -108,6 +110,7 @@ public final class FieldReader extends Terms implements Accountable {
|
|||
*/
|
||||
} else {
|
||||
index = null;
|
||||
isFSTOffHeap = false;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -212,4 +215,12 @@ public final class FieldReader extends Terms implements Accountable {
|
|||
public String toString() {
|
||||
return "BlockTreeTerms(seg=" + parent.segment +" terms=" + numTerms + ",postings=" + sumDocFreq + ",positions=" + sumTotalTermFreq + ",docs=" + docCount + ")";
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> iff the FST is read off-heap.
|
||||
*/
|
||||
public boolean isFstOffHeap() {
|
||||
return isFSTOffHeap;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -673,7 +673,7 @@ public final class CheckIndex implements Closeable {
|
|||
long startOpenReaderNS = System.nanoTime();
|
||||
if (infoStream != null)
|
||||
infoStream.print(" test: open reader.........");
|
||||
reader = new SegmentReader(info, sis.getIndexCreatedVersionMajor(), IOContext.DEFAULT);
|
||||
reader = new SegmentReader(info, sis.getIndexCreatedVersionMajor(), false, IOContext.DEFAULT);
|
||||
msg(infoStream, String.format(Locale.ROOT, "OK [took %.3f sec]", nsToSec(System.nanoTime()-startOpenReaderNS)));
|
||||
|
||||
segInfoStat.openReaderPassed = true;
|
||||
|
|
|
@ -131,7 +131,7 @@ final class DefaultIndexingChain extends DocConsumer {
|
|||
if (docState.infoStream.isEnabled("IW")) {
|
||||
docState.infoStream.message("IW", ((System.nanoTime()-t0)/1000000) + " msec to write norms");
|
||||
}
|
||||
SegmentReadState readState = new SegmentReadState(state.directory, state.segmentInfo, state.fieldInfos, IOContext.READ, state.segmentSuffix);
|
||||
SegmentReadState readState = new SegmentReadState(state.directory, state.segmentInfo, state.fieldInfos, true, IOContext.READ, state.segmentSuffix);
|
||||
|
||||
t0 = System.nanoTime();
|
||||
writeDocValues(state, sortMap);
|
||||
|
|
|
@ -169,7 +169,7 @@ final class ReadersAndUpdates {
|
|||
public synchronized SegmentReader getReader(IOContext context) throws IOException {
|
||||
if (reader == null) {
|
||||
// We steal returned ref:
|
||||
reader = new SegmentReader(info, indexCreatedVersionMajor, context);
|
||||
reader = new SegmentReader(info, indexCreatedVersionMajor, true, context);
|
||||
pendingDeletes.onNewReader(reader, info);
|
||||
}
|
||||
|
||||
|
@ -536,7 +536,7 @@ final class ReadersAndUpdates {
|
|||
// IndexWriter.commitMergedDeletes).
|
||||
final SegmentReader reader;
|
||||
if (this.reader == null) {
|
||||
reader = new SegmentReader(info, indexCreatedVersionMajor, IOContext.READONCE);
|
||||
reader = new SegmentReader(info, indexCreatedVersionMajor, true, IOContext.READONCE);
|
||||
pendingDeletes.onNewReader(reader, info);
|
||||
} else {
|
||||
reader = this.reader;
|
||||
|
|
|
@ -89,7 +89,7 @@ final class SegmentCoreReaders {
|
|||
private final Set<IndexReader.ClosedListener> coreClosedListeners =
|
||||
Collections.synchronizedSet(new LinkedHashSet<IndexReader.ClosedListener>());
|
||||
|
||||
SegmentCoreReaders(Directory dir, SegmentCommitInfo si, IOContext context) throws IOException {
|
||||
SegmentCoreReaders(Directory dir, SegmentCommitInfo si, boolean openedFromWriter, IOContext context) throws IOException {
|
||||
|
||||
final Codec codec = si.info.getCodec();
|
||||
final Directory cfsDir; // confusing name: if (cfs) it's the cfsdir, otherwise it's the segment's directory.
|
||||
|
@ -107,7 +107,7 @@ final class SegmentCoreReaders {
|
|||
|
||||
coreFieldInfos = codec.fieldInfosFormat().read(cfsDir, si.info, "", context);
|
||||
|
||||
final SegmentReadState segmentReadState = new SegmentReadState(cfsDir, si.info, coreFieldInfos, context);
|
||||
final SegmentReadState segmentReadState = new SegmentReadState(cfsDir, si.info, coreFieldInfos, openedFromWriter, context);
|
||||
final PostingsFormat format = codec.postingsFormat();
|
||||
// Ask codec for its Fields
|
||||
fields = format.fieldsProducer(segmentReadState);
|
||||
|
|
|
@ -46,7 +46,7 @@ final class SegmentDocValues {
|
|||
}
|
||||
|
||||
// set SegmentReadState to list only the fields that are relevant to that gen
|
||||
SegmentReadState srs = new SegmentReadState(dvDir, si.info, infos, IOContext.READ, segmentSuffix);
|
||||
SegmentReadState srs = new SegmentReadState(dvDir, si.info, infos, false, IOContext.READ, segmentSuffix);
|
||||
DocValuesFormat dvFormat = si.info.getCodec().docValuesFormat();
|
||||
return new RefCount<DocValuesProducer>(dvFormat.fieldsProducer(srs)) {
|
||||
@SuppressWarnings("synthetic-access")
|
||||
|
|
|
@ -112,7 +112,7 @@ final class SegmentMerger {
|
|||
final SegmentWriteState segmentWriteState = new SegmentWriteState(mergeState.infoStream, directory, mergeState.segmentInfo,
|
||||
mergeState.mergeFieldInfos, null, context);
|
||||
final SegmentReadState segmentReadState = new SegmentReadState(directory, mergeState.segmentInfo, mergeState.mergeFieldInfos,
|
||||
IOContext.READ, segmentWriteState.segmentSuffix);
|
||||
true, IOContext.READ, segmentWriteState.segmentSuffix);
|
||||
|
||||
if (mergeState.mergeFieldInfos.hasNorms()) {
|
||||
if (mergeState.infoStream.isEnabled("SM")) {
|
||||
|
|
|
@ -49,23 +49,29 @@ public class SegmentReadState {
|
|||
* {@link IndexFileNames#segmentFileName(String,String,String)}). */
|
||||
public final String segmentSuffix;
|
||||
|
||||
/**
|
||||
* True iff this SegmentReadState is opened from an IndexWriter.
|
||||
*/
|
||||
public final boolean openedFromWriter;
|
||||
|
||||
/** Create a {@code SegmentReadState}. */
|
||||
public SegmentReadState(Directory dir, SegmentInfo info,
|
||||
FieldInfos fieldInfos, IOContext context) {
|
||||
this(dir, info, fieldInfos, context, "");
|
||||
FieldInfos fieldInfos, boolean openedFromWriter, IOContext context) {
|
||||
this(dir, info, fieldInfos, openedFromWriter, context, "");
|
||||
}
|
||||
|
||||
/** Create a {@code SegmentReadState}. */
|
||||
public SegmentReadState(Directory dir,
|
||||
SegmentInfo info,
|
||||
FieldInfos fieldInfos,
|
||||
IOContext context,
|
||||
boolean openedFromWriter, IOContext context,
|
||||
String segmentSuffix) {
|
||||
this.directory = dir;
|
||||
this.segmentInfo = info;
|
||||
this.fieldInfos = fieldInfos;
|
||||
this.context = context;
|
||||
this.segmentSuffix = segmentSuffix;
|
||||
this.openedFromWriter = openedFromWriter;
|
||||
}
|
||||
|
||||
/** Create a {@code SegmentReadState}. */
|
||||
|
@ -75,6 +81,7 @@ public class SegmentReadState {
|
|||
this.segmentInfo = other.segmentInfo;
|
||||
this.fieldInfos = other.fieldInfos;
|
||||
this.context = other.context;
|
||||
this.openedFromWriter = other.openedFromWriter;
|
||||
this.segmentSuffix = newSegmentSuffix;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -72,7 +72,7 @@ public final class SegmentReader extends CodecReader {
|
|||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws IOException if there is a low-level IO error
|
||||
*/
|
||||
SegmentReader(SegmentCommitInfo si, int createdVersionMajor, IOContext context) throws IOException {
|
||||
SegmentReader(SegmentCommitInfo si, int createdVersionMajor, boolean openedFromWriter, IOContext context) throws IOException {
|
||||
this.si = si.clone();
|
||||
this.originalSi = si;
|
||||
this.metaData = new LeafMetaData(createdVersionMajor, si.info.getMinVersion(), si.info.getIndexSort());
|
||||
|
@ -80,7 +80,7 @@ public final class SegmentReader extends CodecReader {
|
|||
// We pull liveDocs/DV updates from disk:
|
||||
this.isNRT = false;
|
||||
|
||||
core = new SegmentCoreReaders(si.info.dir, si, context);
|
||||
core = new SegmentCoreReaders(si.info.dir, si, openedFromWriter, context);
|
||||
segDocValues = new SegmentDocValues();
|
||||
|
||||
boolean success = false;
|
||||
|
|
|
@ -63,7 +63,7 @@ public final class StandardDirectoryReader extends DirectoryReader {
|
|||
boolean success = false;
|
||||
try {
|
||||
for (int i = sis.size()-1; i >= 0; i--) {
|
||||
readers[i] = new SegmentReader(sis.info(i), sis.getIndexCreatedVersionMajor(), IOContext.READ);
|
||||
readers[i] = new SegmentReader(sis.info(i), sis.getIndexCreatedVersionMajor(), false, IOContext.READ);
|
||||
}
|
||||
|
||||
// This may throw CorruptIndexException if there are too many docs, so
|
||||
|
@ -176,7 +176,7 @@ public final class StandardDirectoryReader extends DirectoryReader {
|
|||
SegmentReader newReader;
|
||||
if (oldReader == null || commitInfo.info.getUseCompoundFile() != oldReader.getSegmentInfo().info.getUseCompoundFile()) {
|
||||
// this is a new reader; in case we hit an exception we can decRef it safely
|
||||
newReader = new SegmentReader(commitInfo, infos.getIndexCreatedVersionMajor(), IOContext.READ);
|
||||
newReader = new SegmentReader(commitInfo, infos.getIndexCreatedVersionMajor(), false, IOContext.READ);
|
||||
newReaders[i] = newReader;
|
||||
} else {
|
||||
if (oldReader.isNRT) {
|
||||
|
|
|
@ -18,6 +18,7 @@ package org.apache.lucene.codecs.lucene50;
|
|||
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
@ -35,11 +36,15 @@ import org.apache.lucene.index.DirectoryReader;
|
|||
import org.apache.lucene.index.Impact;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.store.ByteArrayDataInput;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
import org.apache.lucene.store.MMapDirectory;
|
||||
import org.apache.lucene.store.SimpleFSDirectory;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
/**
|
||||
|
@ -52,6 +57,72 @@ public class TestBlockPostingsFormat extends BasePostingsFormatTestCase {
|
|||
protected Codec getCodec() {
|
||||
return codec;
|
||||
}
|
||||
|
||||
public void testFstOffHeap() throws IOException {
|
||||
Path tempDir = createTempDir();
|
||||
try (Directory d = MMapDirectory.open(tempDir)) {
|
||||
try (IndexWriter w = new IndexWriter(d, new IndexWriterConfig(new MockAnalyzer(random())))) {
|
||||
DirectoryReader readerFromWriter = DirectoryReader.open(w);
|
||||
for (int i = 0; i < 50; i++) {
|
||||
Document doc = new Document();
|
||||
doc.add(newStringField("id", "" + i, Field.Store.NO));
|
||||
doc.add(newStringField("field", Character.toString((char) (97 + i)), Field.Store.NO));
|
||||
doc.add(newStringField("field", Character.toString((char) (98 + i)), Field.Store.NO));
|
||||
if (rarely()) {
|
||||
w.addDocument(doc);
|
||||
} else {
|
||||
w.updateDocument(new Term("id", "" + i), doc);
|
||||
}
|
||||
if (random().nextBoolean()) {
|
||||
w.commit();
|
||||
}
|
||||
|
||||
if (random().nextBoolean()) {
|
||||
DirectoryReader newReader = DirectoryReader.openIfChanged(readerFromWriter);
|
||||
if (newReader != null) {
|
||||
readerFromWriter.close();
|
||||
readerFromWriter = newReader;
|
||||
}
|
||||
for (LeafReaderContext leaf : readerFromWriter.leaves()) {
|
||||
FieldReader field = (FieldReader) leaf.reader().terms("field");
|
||||
FieldReader id = (FieldReader) leaf.reader().terms("id");
|
||||
assertFalse(id.isFstOffHeap());
|
||||
assertTrue(field.isFstOffHeap());
|
||||
}
|
||||
}
|
||||
}
|
||||
readerFromWriter.close();
|
||||
|
||||
w.forceMerge(1);
|
||||
try (DirectoryReader r = DirectoryReader.open(w)) {
|
||||
assertEquals(1, r.leaves().size());
|
||||
FieldReader field = (FieldReader) r.leaves().get(0).reader().terms("field");
|
||||
FieldReader id = (FieldReader) r.leaves().get(0).reader().terms("id");
|
||||
assertFalse(id.isFstOffHeap());
|
||||
assertTrue(field.isFstOffHeap());
|
||||
}
|
||||
w.commit();
|
||||
try (DirectoryReader r = DirectoryReader.open(d)) {
|
||||
assertEquals(1, r.leaves().size());
|
||||
FieldReader field = (FieldReader) r.leaves().get(0).reader().terms("field");
|
||||
FieldReader id = (FieldReader) r.leaves().get(0).reader().terms("id");
|
||||
assertTrue(id.isFstOffHeap());
|
||||
assertTrue(field.isFstOffHeap());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
try (Directory d = new SimpleFSDirectory(tempDir)) {
|
||||
// test auto
|
||||
try (DirectoryReader r = DirectoryReader.open(d)) {
|
||||
assertEquals(1, r.leaves().size());
|
||||
FieldReader field = (FieldReader) r.leaves().get(0).reader().terms("field");
|
||||
FieldReader id = (FieldReader) r.leaves().get(0).reader().terms("id");
|
||||
assertFalse(id.isFstOffHeap());
|
||||
assertFalse(field.isFstOffHeap());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Make sure the final sub-block(s) are not skipped. */
|
||||
public void testFinalBlock() throws Exception {
|
||||
|
|
|
@ -222,7 +222,7 @@ public class TestCodecs extends LuceneTestCase {
|
|||
final SegmentInfo si = new SegmentInfo(dir, Version.LATEST, Version.LATEST, SEGMENT, 10000, false, codec, Collections.emptyMap(), StringHelper.randomId(), new HashMap<>(), null);
|
||||
|
||||
this.write(si, fieldInfos, dir, fields);
|
||||
final FieldsProducer reader = codec.postingsFormat().fieldsProducer(new SegmentReadState(dir, si, fieldInfos, newIOContext(random())));
|
||||
final FieldsProducer reader = codec.postingsFormat().fieldsProducer(new SegmentReadState(dir, si, fieldInfos, false, newIOContext(random())));
|
||||
|
||||
final Iterator<String> fieldsEnum = reader.iterator();
|
||||
String fieldName = fieldsEnum.next();
|
||||
|
@ -282,7 +282,7 @@ public class TestCodecs extends LuceneTestCase {
|
|||
if (VERBOSE) {
|
||||
System.out.println("TEST: now read postings");
|
||||
}
|
||||
final FieldsProducer terms = codec.postingsFormat().fieldsProducer(new SegmentReadState(dir, si, fieldInfos, newIOContext(random())));
|
||||
final FieldsProducer terms = codec.postingsFormat().fieldsProducer(new SegmentReadState(dir, si, fieldInfos, false, newIOContext(random())));
|
||||
|
||||
final Verify[] threads = new Verify[NUM_TEST_THREADS-1];
|
||||
for(int i=0;i<NUM_TEST_THREADS-1;i++) {
|
||||
|
|
|
@ -415,7 +415,7 @@ public class TestDemoParallelLeafReader extends LuceneTestCase {
|
|||
|
||||
SegmentInfos infos = SegmentInfos.readLatestCommit(dir);
|
||||
assert infos.size() == 1;
|
||||
final LeafReader parLeafReader = new SegmentReader(infos.info(0), Version.LATEST.major, IOContext.DEFAULT);
|
||||
final LeafReader parLeafReader = new SegmentReader(infos.info(0), Version.LATEST.major, false, IOContext.DEFAULT);
|
||||
|
||||
//checkParallelReader(leaf, parLeafReader, schemaGen);
|
||||
|
||||
|
|
|
@ -213,8 +213,8 @@ public class TestDoc extends LuceneTestCase {
|
|||
private SegmentCommitInfo merge(Directory dir, SegmentCommitInfo si1, SegmentCommitInfo si2, String merged, boolean useCompoundFile)
|
||||
throws Exception {
|
||||
IOContext context = newIOContext(random(), new IOContext(new MergeInfo(-1, -1, false, -1)));
|
||||
SegmentReader r1 = new SegmentReader(si1, Version.LATEST.major, context);
|
||||
SegmentReader r2 = new SegmentReader(si2, Version.LATEST.major, context);
|
||||
SegmentReader r1 = new SegmentReader(si1, Version.LATEST.major, false, context);
|
||||
SegmentReader r2 = new SegmentReader(si2, Version.LATEST.major, false, context);
|
||||
|
||||
final Codec codec = Codec.getDefault();
|
||||
TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(si1.info.dir);
|
||||
|
@ -244,7 +244,7 @@ public class TestDoc extends LuceneTestCase {
|
|||
|
||||
private void printSegment(PrintWriter out, SegmentCommitInfo si)
|
||||
throws Exception {
|
||||
SegmentReader reader = new SegmentReader(si, Version.LATEST.major, newIOContext(random()));
|
||||
SegmentReader reader = new SegmentReader(si, Version.LATEST.major, false, newIOContext(random()));
|
||||
|
||||
for (int i = 0; i < reader.numDocs(); i++)
|
||||
out.println(reader.document(i));
|
||||
|
|
|
@ -63,7 +63,7 @@ public class TestDocumentWriter extends LuceneTestCase {
|
|||
SegmentCommitInfo info = writer.newestSegment();
|
||||
writer.close();
|
||||
//After adding the document, we should be able to read it back in
|
||||
SegmentReader reader = new SegmentReader(info, Version.LATEST.major, newIOContext(random()));
|
||||
SegmentReader reader = new SegmentReader(info, Version.LATEST.major, false, newIOContext(random()));
|
||||
assertTrue(reader != null);
|
||||
Document doc = reader.document(0);
|
||||
assertTrue(doc != null);
|
||||
|
@ -124,7 +124,7 @@ public class TestDocumentWriter extends LuceneTestCase {
|
|||
writer.commit();
|
||||
SegmentCommitInfo info = writer.newestSegment();
|
||||
writer.close();
|
||||
SegmentReader reader = new SegmentReader(info, Version.LATEST.major, newIOContext(random()));
|
||||
SegmentReader reader = new SegmentReader(info, Version.LATEST.major, false, newIOContext(random()));
|
||||
|
||||
PostingsEnum termPositions = MultiTerms.getTermPostingsEnum(reader, "repeated", new BytesRef("repeated"));
|
||||
assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
|
@ -195,7 +195,7 @@ public class TestDocumentWriter extends LuceneTestCase {
|
|||
writer.commit();
|
||||
SegmentCommitInfo info = writer.newestSegment();
|
||||
writer.close();
|
||||
SegmentReader reader = new SegmentReader(info, Version.LATEST.major, newIOContext(random()));
|
||||
SegmentReader reader = new SegmentReader(info, Version.LATEST.major, false, newIOContext(random()));
|
||||
|
||||
PostingsEnum termPositions = MultiTerms.getTermPostingsEnum(reader, "f1", new BytesRef("a"));
|
||||
assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
|
@ -237,7 +237,7 @@ public class TestDocumentWriter extends LuceneTestCase {
|
|||
writer.commit();
|
||||
SegmentCommitInfo info = writer.newestSegment();
|
||||
writer.close();
|
||||
SegmentReader reader = new SegmentReader(info, Version.LATEST.major, newIOContext(random()));
|
||||
SegmentReader reader = new SegmentReader(info, Version.LATEST.major, false, newIOContext(random()));
|
||||
|
||||
PostingsEnum termPositions = reader.postings(new Term("preanalyzed", "term1"), PostingsEnum.ALL);
|
||||
assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
|
|
|
@ -332,7 +332,7 @@ public class TestIndexWriterThreadsToSegments extends LuceneTestCase {
|
|||
SegmentInfo si = TestUtil.getDefaultCodec().segmentInfoFormat().read(dir, segName, id, IOContext.DEFAULT);
|
||||
si.setCodec(codec);
|
||||
SegmentCommitInfo sci = new SegmentCommitInfo(si, 0, 0, -1, -1, -1);
|
||||
SegmentReader sr = new SegmentReader(sci, Version.LATEST.major, IOContext.DEFAULT);
|
||||
SegmentReader sr = new SegmentReader(sci, Version.LATEST.major, false, IOContext.DEFAULT);
|
||||
try {
|
||||
thread0Count += sr.docFreq(new Term("field", "threadID0"));
|
||||
thread1Count += sr.docFreq(new Term("field", "threadID1"));
|
||||
|
|
|
@ -60,8 +60,8 @@ public class TestSegmentMerger extends LuceneTestCase {
|
|||
SegmentCommitInfo info1 = DocHelper.writeDoc(random(), merge1Dir, doc1);
|
||||
DocHelper.setupDoc(doc2);
|
||||
SegmentCommitInfo info2 = DocHelper.writeDoc(random(), merge2Dir, doc2);
|
||||
reader1 = new SegmentReader(info1, Version.LATEST.major, newIOContext(random()));
|
||||
reader2 = new SegmentReader(info2, Version.LATEST.major, newIOContext(random()));
|
||||
reader1 = new SegmentReader(info1, Version.LATEST.major, false, newIOContext(random()));
|
||||
reader2 = new SegmentReader(info2, Version.LATEST.major, false, newIOContext(random()));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -98,7 +98,7 @@ public class TestSegmentMerger extends LuceneTestCase {
|
|||
mergeState.segmentInfo,
|
||||
0, 0, -1L, -1L, -1L),
|
||||
Version.LATEST.major,
|
||||
newIOContext(random()));
|
||||
false, newIOContext(random()));
|
||||
assertTrue(mergedReader != null);
|
||||
assertTrue(mergedReader.numDocs() == 2);
|
||||
Document newDoc1 = mergedReader.document(0);
|
||||
|
|
|
@ -43,7 +43,7 @@ public class TestSegmentReader extends LuceneTestCase {
|
|||
dir = newDirectory();
|
||||
DocHelper.setupDoc(testDoc);
|
||||
SegmentCommitInfo info = DocHelper.writeDoc(random(), dir, testDoc);
|
||||
reader = new SegmentReader(info, Version.LATEST.major, IOContext.READ);
|
||||
reader = new SegmentReader(info, Version.LATEST.major, false, IOContext.READ);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -54,7 +54,7 @@ public class TestSegmentTermDocs extends LuceneTestCase {
|
|||
|
||||
public void testTermDocs() throws IOException {
|
||||
//After adding the document, we should be able to read it back in
|
||||
SegmentReader reader = new SegmentReader(info, Version.LATEST.major, newIOContext(random()));
|
||||
SegmentReader reader = new SegmentReader(info, Version.LATEST.major, false, newIOContext(random()));
|
||||
assertTrue(reader != null);
|
||||
|
||||
TermsEnum terms = reader.terms(DocHelper.TEXT_FIELD_2_KEY).iterator();
|
||||
|
@ -72,7 +72,7 @@ public class TestSegmentTermDocs extends LuceneTestCase {
|
|||
public void testBadSeek() throws IOException {
|
||||
{
|
||||
//After adding the document, we should be able to read it back in
|
||||
SegmentReader reader = new SegmentReader(info, Version.LATEST.major, newIOContext(random()));
|
||||
SegmentReader reader = new SegmentReader(info, Version.LATEST.major, false, newIOContext(random()));
|
||||
assertTrue(reader != null);
|
||||
PostingsEnum termDocs = TestUtil.docs(random(), reader,
|
||||
"textField2",
|
||||
|
@ -85,7 +85,7 @@ public class TestSegmentTermDocs extends LuceneTestCase {
|
|||
}
|
||||
{
|
||||
//After adding the document, we should be able to read it back in
|
||||
SegmentReader reader = new SegmentReader(info, Version.LATEST.major, newIOContext(random()));
|
||||
SegmentReader reader = new SegmentReader(info, Version.LATEST.major, false, newIOContext(random()));
|
||||
assertTrue(reader != null);
|
||||
PostingsEnum termDocs = TestUtil.docs(random(), reader,
|
||||
"junk",
|
||||
|
|
|
@ -355,7 +355,7 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase {
|
|||
segmentInfo, fieldInfos,
|
||||
null, new IOContext(new FlushInfo(1, 20)));
|
||||
|
||||
SegmentReadState readState = new SegmentReadState(dir, segmentInfo, fieldInfos, IOContext.READ);
|
||||
SegmentReadState readState = new SegmentReadState(dir, segmentInfo, fieldInfos, false, IOContext.READ);
|
||||
|
||||
// PostingsFormat
|
||||
NormsProducer fakeNorms = new NormsProducer() {
|
||||
|
|
|
@ -748,7 +748,7 @@ public class RandomPostingsTester {
|
|||
|
||||
currentFieldInfos = newFieldInfos;
|
||||
|
||||
SegmentReadState readState = new SegmentReadState(dir, segmentInfo, newFieldInfos, IOContext.READ);
|
||||
SegmentReadState readState = new SegmentReadState(dir, segmentInfo, newFieldInfos, false, IOContext.READ);
|
||||
|
||||
return codec.postingsFormat().fieldsProducer(readState);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue