mirror of
https://github.com/apache/lucene.git
synced 2025-02-27 21:09:19 +00:00
LUCENE-9257: Always keep FST off-heap. Remove FSTLoadMode and Reader attributes.
Closes #1320
This commit is contained in:
parent
9cfdf17b28
commit
9733643466
@ -239,6 +239,8 @@ Other
|
||||
|
||||
* LUCENE-9225: Rectangle extends LatLonGeometry so it can be used in a geometry collection. (Ignacio Vera)
|
||||
|
||||
* LUCENE-9257: Always keep FST off-heap. FSTLoadMode and Reader attributes removed. (Bruno Roustant)
|
||||
|
||||
======================= Lucene 8.4.1 =======================
|
||||
|
||||
Bug Fixes
|
||||
|
@ -391,18 +391,11 @@ public class Lucene50PostingsFormat extends PostingsFormat {
|
||||
*/
|
||||
// NOTE: must be multiple of 64 because of PackedInts long-aligned encoding/decoding
|
||||
public final static int BLOCK_SIZE = 128;
|
||||
private final BlockTreeTermsReader.FSTLoadMode fstLoadMode;
|
||||
|
||||
/** Creates {@code Lucene50PostingsFormat} with default
|
||||
* settings. */
|
||||
public Lucene50PostingsFormat() {
|
||||
this(BlockTreeTermsReader.FSTLoadMode.AUTO);
|
||||
}
|
||||
|
||||
/** Creates {@code Lucene50PostingsFormat}. */
|
||||
public Lucene50PostingsFormat(BlockTreeTermsReader.FSTLoadMode loadMode) {
|
||||
super("Lucene50");
|
||||
this.fstLoadMode = loadMode;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -420,7 +413,7 @@ public class Lucene50PostingsFormat extends PostingsFormat {
|
||||
PostingsReaderBase postingsReader = new Lucene50PostingsReader(state);
|
||||
boolean success = false;
|
||||
try {
|
||||
FieldsProducer ret = new BlockTreeTermsReader(postingsReader, state, fstLoadMode);
|
||||
FieldsProducer ret = new BlockTreeTermsReader(postingsReader, state);
|
||||
success = true;
|
||||
return ret;
|
||||
} finally {
|
||||
|
@ -18,17 +18,13 @@ package org.apache.lucene.codecs.lucene50;
|
||||
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.codecs.Codec;
|
||||
import org.apache.lucene.codecs.CompetitiveImpactAccumulator;
|
||||
import org.apache.lucene.codecs.blocktree.BlockTreeTermsReader;
|
||||
import org.apache.lucene.codecs.blocktree.FieldReader;
|
||||
import org.apache.lucene.codecs.blocktree.Stats;
|
||||
import org.apache.lucene.codecs.lucene50.Lucene50ScoreSkipReader.MutableImpactList;
|
||||
@ -39,16 +35,11 @@ import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.Impact;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.store.ByteArrayDataInput;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.FSDirectory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
import org.apache.lucene.store.MMapDirectory;
|
||||
import org.apache.lucene.store.SimpleFSDirectory;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
/**
|
||||
@ -62,202 +53,6 @@ public class TestBlockPostingsFormat extends BasePostingsFormatTestCase {
|
||||
return codec;
|
||||
}
|
||||
|
||||
public void testFstOffHeap() throws IOException {
|
||||
Path tempDir = createTempDir();
|
||||
try (Directory d = FSDirectory.open(tempDir)) {
|
||||
assumeTrue("only works with mmap directory", d instanceof MMapDirectory);
|
||||
try (IndexWriter w = new IndexWriter(d, new IndexWriterConfig(new MockAnalyzer(random())))) {
|
||||
DirectoryReader readerFromWriter = DirectoryReader.open(w);
|
||||
for (int i = 0; i < 50; i++) {
|
||||
Document doc = new Document();
|
||||
doc.add(newStringField("id", "" + i, Field.Store.NO));
|
||||
doc.add(newStringField("field", Character.toString((char) (97 + i)), Field.Store.NO));
|
||||
doc.add(newStringField("field", Character.toString((char) (98 + i)), Field.Store.NO));
|
||||
if (rarely()) {
|
||||
w.addDocument(doc);
|
||||
} else {
|
||||
w.updateDocument(new Term("id", "" + i), doc);
|
||||
}
|
||||
if (random().nextBoolean()) {
|
||||
w.commit();
|
||||
}
|
||||
|
||||
if (random().nextBoolean()) {
|
||||
DirectoryReader newReader = DirectoryReader.openIfChanged(readerFromWriter);
|
||||
if (newReader != null) {
|
||||
readerFromWriter.close();
|
||||
readerFromWriter = newReader;
|
||||
}
|
||||
for (LeafReaderContext leaf : readerFromWriter.leaves()) {
|
||||
FieldReader field = (FieldReader) leaf.reader().terms("field");
|
||||
FieldReader id = (FieldReader) leaf.reader().terms("id");
|
||||
assertFalse(id.isFstOffHeap());
|
||||
assertTrue(field.isFstOffHeap());
|
||||
}
|
||||
}
|
||||
}
|
||||
readerFromWriter.close();
|
||||
|
||||
w.forceMerge(1);
|
||||
try (DirectoryReader r = DirectoryReader.open(w)) {
|
||||
assertEquals(1, r.leaves().size());
|
||||
FieldReader field = (FieldReader) r.leaves().get(0).reader().terms("field");
|
||||
FieldReader id = (FieldReader) r.leaves().get(0).reader().terms("id");
|
||||
assertFalse(id.isFstOffHeap());
|
||||
assertTrue(field.isFstOffHeap());
|
||||
}
|
||||
w.commit();
|
||||
try (DirectoryReader r = DirectoryReader.open(d)) {
|
||||
assertEquals(1, r.leaves().size());
|
||||
FieldReader field = (FieldReader) r.leaves().get(0).reader().terms("field");
|
||||
FieldReader id = (FieldReader) r.leaves().get(0).reader().terms("id");
|
||||
assertTrue(id.isFstOffHeap());
|
||||
assertTrue(field.isFstOffHeap());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
try (Directory d = new SimpleFSDirectory(tempDir)) {
|
||||
// test auto
|
||||
try (DirectoryReader r = DirectoryReader.open(d)) {
|
||||
assertEquals(1, r.leaves().size());
|
||||
FieldReader field = (FieldReader) r.leaves().get(0).reader().terms("field");
|
||||
FieldReader id = (FieldReader) r.leaves().get(0).reader().terms("id");
|
||||
assertFalse(id.isFstOffHeap());
|
||||
assertFalse(field.isFstOffHeap());
|
||||
}
|
||||
}
|
||||
|
||||
try (Directory d = new SimpleFSDirectory(tempDir)) {
|
||||
// test per field
|
||||
Map<String, String> readerAttributes = new HashMap<>();
|
||||
readerAttributes.put(BlockTreeTermsReader.FST_MODE_KEY, BlockTreeTermsReader.FSTLoadMode.OFF_HEAP.name());
|
||||
readerAttributes.put(BlockTreeTermsReader.FST_MODE_KEY + ".field", BlockTreeTermsReader.FSTLoadMode.ON_HEAP.name());
|
||||
try (DirectoryReader r = DirectoryReader.open(d, readerAttributes)) {
|
||||
assertEquals(1, r.leaves().size());
|
||||
FieldReader field = (FieldReader) r.leaves().get(0).reader().terms("field");
|
||||
FieldReader id = (FieldReader) r.leaves().get(0).reader().terms("id");
|
||||
assertTrue(id.isFstOffHeap());
|
||||
assertFalse(field.isFstOffHeap());
|
||||
}
|
||||
}
|
||||
|
||||
IllegalArgumentException invalid = expectThrows(IllegalArgumentException.class, () -> {
|
||||
try (Directory d = new SimpleFSDirectory(tempDir)) {
|
||||
Map<String, String> readerAttributes = new HashMap<>();
|
||||
readerAttributes.put(BlockTreeTermsReader.FST_MODE_KEY, "invalid");
|
||||
DirectoryReader.open(d, readerAttributes);
|
||||
}
|
||||
});
|
||||
|
||||
assertEquals("Invalid value for blocktree.terms.fst expected one of: [OFF_HEAP, ON_HEAP, OPTIMIZE_UPDATES_OFF_HEAP, AUTO] but was: invalid", invalid.getMessage());
|
||||
}
|
||||
|
||||
public void testDisableFSTOffHeap() throws IOException {
|
||||
Path tempDir = createTempDir();
|
||||
try (Directory d = MMapDirectory.open(tempDir)) {
|
||||
try (IndexWriter w = new IndexWriter(d, new IndexWriterConfig(new MockAnalyzer(random()))
|
||||
.setReaderAttributes(Collections.singletonMap(BlockTreeTermsReader.FST_MODE_KEY, BlockTreeTermsReader.FSTLoadMode.ON_HEAP.name())))) {
|
||||
assumeTrue("only works with mmap directory", d instanceof MMapDirectory);
|
||||
DirectoryReader readerFromWriter = DirectoryReader.open(w);
|
||||
for (int i = 0; i < 50; i++) {
|
||||
Document doc = new Document();
|
||||
doc.add(newStringField("id", "" + i, Field.Store.NO));
|
||||
doc.add(newStringField("field", Character.toString((char) (97 + i)), Field.Store.NO));
|
||||
doc.add(newStringField("field", Character.toString((char) (98 + i)), Field.Store.NO));
|
||||
if (rarely()) {
|
||||
w.addDocument(doc);
|
||||
} else {
|
||||
w.updateDocument(new Term("id", "" + i), doc);
|
||||
}
|
||||
if (random().nextBoolean()) {
|
||||
w.commit();
|
||||
}
|
||||
if (random().nextBoolean()) {
|
||||
DirectoryReader newReader = DirectoryReader.openIfChanged(readerFromWriter);
|
||||
if (newReader != null) {
|
||||
readerFromWriter.close();
|
||||
readerFromWriter = newReader;
|
||||
}
|
||||
for (LeafReaderContext leaf : readerFromWriter.leaves()) {
|
||||
FieldReader field = (FieldReader) leaf.reader().terms("field");
|
||||
FieldReader id = (FieldReader) leaf.reader().terms("id");
|
||||
assertFalse(id.isFstOffHeap());
|
||||
assertFalse(field.isFstOffHeap());
|
||||
}
|
||||
}
|
||||
}
|
||||
readerFromWriter.close();
|
||||
w.forceMerge(1);
|
||||
w.commit();
|
||||
}
|
||||
try (DirectoryReader r = DirectoryReader.open(d, Collections.singletonMap(BlockTreeTermsReader.FST_MODE_KEY, BlockTreeTermsReader.FSTLoadMode.ON_HEAP.name()))) {
|
||||
assertEquals(1, r.leaves().size());
|
||||
FieldReader field = (FieldReader) r.leaves().get(0).reader().terms("field");
|
||||
FieldReader id = (FieldReader) r.leaves().get(0).reader().terms("id");
|
||||
assertFalse(id.isFstOffHeap());
|
||||
assertFalse(field.isFstOffHeap());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testAlwaysFSTOffHeap() throws IOException {
|
||||
boolean alsoLoadIdOffHeap = random().nextBoolean();
|
||||
BlockTreeTermsReader.FSTLoadMode loadMode;
|
||||
if (alsoLoadIdOffHeap) {
|
||||
loadMode = BlockTreeTermsReader.FSTLoadMode.OFF_HEAP;
|
||||
} else {
|
||||
loadMode = BlockTreeTermsReader.FSTLoadMode.OPTIMIZE_UPDATES_OFF_HEAP;
|
||||
}
|
||||
try (Directory d = newDirectory()) { // any directory should work now
|
||||
try (IndexWriter w = new IndexWriter(d, new IndexWriterConfig(new MockAnalyzer(random()))
|
||||
.setReaderAttributes(Collections.singletonMap(BlockTreeTermsReader.FST_MODE_KEY, loadMode.name())))) {
|
||||
DirectoryReader readerFromWriter = DirectoryReader.open(w);
|
||||
for (int i = 0; i < 50; i++) {
|
||||
Document doc = new Document();
|
||||
doc.add(newStringField("id", "" + i, Field.Store.NO));
|
||||
doc.add(newStringField("field", Character.toString((char) (97 + i)), Field.Store.NO));
|
||||
doc.add(newStringField("field", Character.toString((char) (98 + i)), Field.Store.NO));
|
||||
if (rarely()) {
|
||||
w.addDocument(doc);
|
||||
} else {
|
||||
w.updateDocument(new Term("id", "" + i), doc);
|
||||
}
|
||||
if (random().nextBoolean()) {
|
||||
w.commit();
|
||||
}
|
||||
if (random().nextBoolean()) {
|
||||
DirectoryReader newReader = DirectoryReader.openIfChanged(readerFromWriter);
|
||||
if (newReader != null) {
|
||||
readerFromWriter.close();
|
||||
readerFromWriter = newReader;
|
||||
}
|
||||
for (LeafReaderContext leaf : readerFromWriter.leaves()) {
|
||||
FieldReader field = (FieldReader) leaf.reader().terms("field");
|
||||
FieldReader id = (FieldReader) leaf.reader().terms("id");
|
||||
if (alsoLoadIdOffHeap) {
|
||||
assertTrue(id.isFstOffHeap());
|
||||
} else {
|
||||
assertFalse(id.isFstOffHeap());
|
||||
}
|
||||
assertTrue(field.isFstOffHeap());
|
||||
}
|
||||
}
|
||||
}
|
||||
readerFromWriter.close();
|
||||
w.forceMerge(1);
|
||||
w.commit();
|
||||
}
|
||||
try (DirectoryReader r = DirectoryReader.open(d, Collections.singletonMap(BlockTreeTermsReader.FST_MODE_KEY, loadMode.name()))) {
|
||||
assertEquals(1, r.leaves().size());
|
||||
FieldReader field = (FieldReader) r.leaves().get(0).reader().terms("field");
|
||||
FieldReader id = (FieldReader) r.leaves().get(0).reader().terms("id");
|
||||
assertTrue(id.isFstOffHeap());
|
||||
assertTrue(field.isFstOffHeap());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Make sure the final sub-block(s) are not skipped. */
|
||||
public void testFinalBlock() throws Exception {
|
||||
Directory d = newDirectory();
|
||||
|
@ -19,7 +19,6 @@ package org.apache.lucene.codecs.blocktree;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
@ -76,39 +75,6 @@ import org.apache.lucene.util.fst.Outputs;
|
||||
|
||||
public final class BlockTreeTermsReader extends FieldsProducer {
|
||||
|
||||
/**
|
||||
* An enum that allows to control if term index FSTs are loaded into memory or read off-heap
|
||||
*/
|
||||
public enum FSTLoadMode {
|
||||
/**
|
||||
* Always read FSTs from disk.
|
||||
* NOTE: If this option is used the FST will be read off-heap even if buffered directory implementations
|
||||
* are used.
|
||||
*/
|
||||
OFF_HEAP,
|
||||
/**
|
||||
* Never read FSTs from disk ie. all fields FSTs are loaded into memory
|
||||
*/
|
||||
ON_HEAP,
|
||||
/**
|
||||
* Always read FSTs from disk.
|
||||
* An exception is made for ID fields in an IndexWriter context which are always loaded into memory.
|
||||
* This is useful to guarantee best update performance even if a non MMapDirectory is used.
|
||||
* NOTE: If this option is used the FST will be read off-heap even if buffered directory implementations
|
||||
* are used.
|
||||
* See {@link FSTLoadMode#AUTO}
|
||||
*/
|
||||
OPTIMIZE_UPDATES_OFF_HEAP,
|
||||
/**
|
||||
* Automatically make the decision if FSTs are read from disk depending if the segment read from an MMAPDirectory
|
||||
* An exception is made for ID fields in an IndexWriter context which are always loaded into memory.
|
||||
*/
|
||||
AUTO
|
||||
}
|
||||
|
||||
/** Attribute key for fst mode. */
|
||||
public static final String FST_MODE_KEY = "blocktree.terms.fst";
|
||||
|
||||
static final Outputs<BytesRef> FST_OUTPUTS = ByteSequenceOutputs.getSingleton();
|
||||
|
||||
static final BytesRef NO_OUTPUT = FST_OUTPUTS.getNoOutput();
|
||||
@ -160,7 +126,7 @@ public final class BlockTreeTermsReader extends FieldsProducer {
|
||||
final int version;
|
||||
|
||||
/** Sole constructor. */
|
||||
public BlockTreeTermsReader(PostingsReaderBase postingsReader, SegmentReadState state, FSTLoadMode defaultLoadMode) throws IOException {
|
||||
public BlockTreeTermsReader(PostingsReaderBase postingsReader, SegmentReadState state) throws IOException {
|
||||
boolean success = false;
|
||||
|
||||
this.postingsReader = postingsReader;
|
||||
@ -197,7 +163,6 @@ public final class BlockTreeTermsReader extends FieldsProducer {
|
||||
seekDir(termsIn);
|
||||
seekDir(indexIn);
|
||||
|
||||
final FSTLoadMode fstLoadMode = getLoadMode(state.readerAttributes, FST_MODE_KEY, defaultLoadMode);
|
||||
final int numFields = termsIn.readVInt();
|
||||
if (numFields < 0) {
|
||||
throw new CorruptIndexException("invalid numFields: " + numFields, termsIn);
|
||||
@ -235,11 +200,10 @@ public final class BlockTreeTermsReader extends FieldsProducer {
|
||||
if (sumTotalTermFreq < sumDocFreq) { // #positions must be >= #postings
|
||||
throw new CorruptIndexException("invalid sumTotalTermFreq: " + sumTotalTermFreq + " sumDocFreq: " + sumDocFreq, termsIn);
|
||||
}
|
||||
final FSTLoadMode perFieldLoadMode = getLoadMode(state.readerAttributes, FST_MODE_KEY + "." + fieldInfo.name, fstLoadMode);
|
||||
final long indexStartFP = indexIn.readVLong();
|
||||
FieldReader previous = fieldMap.put(fieldInfo.name,
|
||||
new FieldReader(this, fieldInfo, numTerms, rootCode, sumTotalTermFreq, sumDocFreq, docCount,
|
||||
indexStartFP, indexIn, minTerm, maxTerm, state.openedFromWriter, perFieldLoadMode));
|
||||
indexStartFP, indexIn, minTerm, maxTerm, state.openedFromWriter));
|
||||
if (previous != null) {
|
||||
throw new CorruptIndexException("duplicate field: " + fieldInfo.name, termsIn);
|
||||
}
|
||||
@ -256,19 +220,6 @@ public final class BlockTreeTermsReader extends FieldsProducer {
|
||||
}
|
||||
}
|
||||
|
||||
private static FSTLoadMode getLoadMode(Map<String, String> attributes, String key, FSTLoadMode defaultValue) {
|
||||
String value = attributes.get(key);
|
||||
if (value == null) {
|
||||
return defaultValue;
|
||||
}
|
||||
try {
|
||||
return FSTLoadMode.valueOf(value);
|
||||
} catch (IllegalArgumentException ex) {
|
||||
throw new IllegalArgumentException("Invalid value for " + key + " expected one of: "
|
||||
+ Arrays.toString(FSTLoadMode.values()) + " but was: " + value, ex);
|
||||
}
|
||||
}
|
||||
|
||||
private static BytesRef readBytesRef(IndexInput in) throws IOException {
|
||||
int numBytes = in.readVInt();
|
||||
if (numBytes < 0) {
|
||||
|
@ -25,7 +25,6 @@ import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.store.ByteArrayDataInput;
|
||||
import org.apache.lucene.store.ByteBufferIndexInput;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.util.Accountable;
|
||||
import org.apache.lucene.util.Accountables;
|
||||
@ -61,11 +60,10 @@ public final class FieldReader extends Terms implements Accountable {
|
||||
final BlockTreeTermsReader parent;
|
||||
|
||||
final FST<BytesRef> index;
|
||||
final boolean isFSTOffHeap;
|
||||
//private boolean DEBUG;
|
||||
|
||||
FieldReader(BlockTreeTermsReader parent, FieldInfo fieldInfo, long numTerms, BytesRef rootCode, long sumTotalTermFreq, long sumDocFreq, int docCount,
|
||||
long indexStartFP, IndexInput indexIn, BytesRef minTerm, BytesRef maxTerm, boolean openedFromWriter, BlockTreeTermsReader.FSTLoadMode fstLoadMode) throws IOException {
|
||||
long indexStartFP, IndexInput indexIn, BytesRef minTerm, BytesRef maxTerm, boolean openedFromWriter) throws IOException {
|
||||
assert numTerms > 0;
|
||||
this.fieldInfo = fieldInfo;
|
||||
//DEBUG = BlockTreeTermsReader.DEBUG && fieldInfo.name.equals("id");
|
||||
@ -82,32 +80,11 @@ public final class FieldReader extends Terms implements Accountable {
|
||||
// System.out.println("BTTR: seg=" + segment + " field=" + fieldInfo.name + " rootBlockCode=" + rootCode + " divisor=" + indexDivisor);
|
||||
// }
|
||||
rootBlockFP = (new ByteArrayDataInput(rootCode.bytes, rootCode.offset, rootCode.length)).readVLong() >>> BlockTreeTermsReader.OUTPUT_FLAGS_NUM_BITS;
|
||||
// Initialize FST offheap if index is MMapDirectory and
|
||||
// docCount != sumDocFreq implying field is not primary key
|
||||
// Initialize FST always off-heap.
|
||||
if (indexIn != null) {
|
||||
switch (fstLoadMode) {
|
||||
case ON_HEAP:
|
||||
isFSTOffHeap = false;
|
||||
break;
|
||||
case OFF_HEAP:
|
||||
isFSTOffHeap = true;
|
||||
break;
|
||||
case OPTIMIZE_UPDATES_OFF_HEAP:
|
||||
isFSTOffHeap = ((this.docCount != this.sumDocFreq) || openedFromWriter == false);
|
||||
break;
|
||||
case AUTO:
|
||||
isFSTOffHeap = ((this.docCount != this.sumDocFreq) || openedFromWriter == false) && indexIn instanceof ByteBufferIndexInput;
|
||||
break;
|
||||
default:
|
||||
throw new IllegalStateException("unknown enum constant: " + fstLoadMode);
|
||||
}
|
||||
final IndexInput clone = indexIn.clone();
|
||||
clone.seek(indexStartFP);
|
||||
if (isFSTOffHeap) {
|
||||
index = new FST<>(clone, ByteSequenceOutputs.getSingleton(), new OffHeapFSTStore());
|
||||
} else {
|
||||
index = new FST<>(clone, ByteSequenceOutputs.getSingleton());
|
||||
}
|
||||
index = new FST<>(clone, ByteSequenceOutputs.getSingleton(), new OffHeapFSTStore());
|
||||
/*
|
||||
if (false) {
|
||||
final String dotFileName = segment + "_" + fieldInfo.name + ".dot";
|
||||
@ -118,7 +95,6 @@ public final class FieldReader extends Terms implements Accountable {
|
||||
}
|
||||
*/
|
||||
} else {
|
||||
isFSTOffHeap = false;
|
||||
index = null;
|
||||
}
|
||||
}
|
||||
@ -224,12 +200,4 @@ public final class FieldReader extends Terms implements Accountable {
|
||||
public String toString() {
|
||||
return "BlockTreeTerms(seg=" + parent.segment +" terms=" + numTerms + ",postings=" + sumDocFreq + ",positions=" + sumTotalTermFreq + ",docs=" + docCount + ")";
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> iff the FST is read off-heap.
|
||||
*/
|
||||
public boolean isFstOffHeap() {
|
||||
return isFSTOffHeap;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -393,24 +393,21 @@ public final class Lucene84PostingsFormat extends PostingsFormat {
|
||||
private final int minTermBlockSize;
|
||||
private final int maxTermBlockSize;
|
||||
|
||||
private final BlockTreeTermsReader.FSTLoadMode fstLoadMode;
|
||||
|
||||
/** Creates {@code Lucene84PostingsFormat} with default
|
||||
* settings. */
|
||||
public Lucene84PostingsFormat() {
|
||||
this(BlockTreeTermsWriter.DEFAULT_MIN_BLOCK_SIZE, BlockTreeTermsWriter.DEFAULT_MAX_BLOCK_SIZE, BlockTreeTermsReader.FSTLoadMode.AUTO);
|
||||
this(BlockTreeTermsWriter.DEFAULT_MIN_BLOCK_SIZE, BlockTreeTermsWriter.DEFAULT_MAX_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
/** Creates {@code Lucene84PostingsFormat} with custom
|
||||
* values for {@code minBlockSize} and {@code
|
||||
* maxBlockSize} passed to block terms dictionary.
|
||||
* @see BlockTreeTermsWriter#BlockTreeTermsWriter(SegmentWriteState,PostingsWriterBase,int,int) */
|
||||
public Lucene84PostingsFormat(int minTermBlockSize, int maxTermBlockSize, BlockTreeTermsReader.FSTLoadMode loadMode) {
|
||||
public Lucene84PostingsFormat(int minTermBlockSize, int maxTermBlockSize) {
|
||||
super("Lucene84");
|
||||
BlockTreeTermsWriter.validateSettings(minTermBlockSize, maxTermBlockSize);
|
||||
this.minTermBlockSize = minTermBlockSize;
|
||||
this.maxTermBlockSize = maxTermBlockSize;
|
||||
this.fstLoadMode = loadMode;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -441,7 +438,7 @@ public final class Lucene84PostingsFormat extends PostingsFormat {
|
||||
PostingsReaderBase postingsReader = new Lucene84PostingsReader(state);
|
||||
boolean success = false;
|
||||
try {
|
||||
FieldsProducer ret = new BlockTreeTermsReader(postingsReader, state, fstLoadMode);
|
||||
FieldsProducer ret = new BlockTreeTermsReader(postingsReader, state);
|
||||
success = true;
|
||||
return ret;
|
||||
} finally {
|
||||
|
@ -25,7 +25,6 @@ import java.nio.file.Paths;
|
||||
import java.text.NumberFormat;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
@ -39,7 +38,6 @@ import org.apache.lucene.codecs.PointsReader;
|
||||
import org.apache.lucene.codecs.PostingsFormat;
|
||||
import org.apache.lucene.codecs.StoredFieldsReader;
|
||||
import org.apache.lucene.codecs.TermVectorsReader;
|
||||
import org.apache.lucene.codecs.blocktree.BlockTreeTermsReader;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.DocumentStoredFieldVisitor;
|
||||
import org.apache.lucene.index.CheckIndex.Status.DocValuesStatus;
|
||||
@ -674,8 +672,7 @@ public final class CheckIndex implements Closeable {
|
||||
long startOpenReaderNS = System.nanoTime();
|
||||
if (infoStream != null)
|
||||
infoStream.print(" test: open reader.........");
|
||||
reader = new SegmentReader(info, sis.getIndexCreatedVersionMajor(), false, IOContext.DEFAULT,
|
||||
Collections.singletonMap(BlockTreeTermsReader.FST_MODE_KEY, BlockTreeTermsReader.FSTLoadMode.OFF_HEAP.name())); // lets keep stuff on disk for check-index
|
||||
reader = new SegmentReader(info, sis.getIndexCreatedVersionMajor(), false, IOContext.DEFAULT);
|
||||
msg(infoStream, String.format(Locale.ROOT, "OK [took %.3f sec]", nsToSec(System.nanoTime()-startOpenReaderNS)));
|
||||
|
||||
segInfoStat.openReaderPassed = true;
|
||||
|
@ -21,7 +21,6 @@ import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
@ -132,7 +131,7 @@ final class DefaultIndexingChain extends DocConsumer {
|
||||
if (docState.infoStream.isEnabled("IW")) {
|
||||
docState.infoStream.message("IW", ((System.nanoTime()-t0)/1000000) + " msec to write norms");
|
||||
}
|
||||
SegmentReadState readState = new SegmentReadState(state.directory, state.segmentInfo, state.fieldInfos, true, IOContext.READ, state.segmentSuffix, Collections.emptyMap());
|
||||
SegmentReadState readState = new SegmentReadState(state.directory, state.segmentInfo, state.fieldInfos, true, IOContext.READ, state.segmentSuffix);
|
||||
|
||||
t0 = System.nanoTime();
|
||||
writeDocValues(state, sortMap);
|
||||
|
@ -23,7 +23,6 @@ import java.nio.file.NoSuchFileException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.lucene.search.SearcherManager; // javadocs
|
||||
import org.apache.lucene.store.Directory;
|
||||
@ -61,19 +60,7 @@ public abstract class DirectoryReader extends BaseCompositeReader<LeafReader> {
|
||||
* @throws IOException if there is a low-level IO error
|
||||
*/
|
||||
public static DirectoryReader open(final Directory directory) throws IOException {
|
||||
return open(directory, Collections.emptyMap());
|
||||
}
|
||||
|
||||
/** Returns a IndexReader reading the index in the given
|
||||
* Directory
|
||||
* @param directory the index directory
|
||||
* @param readerAttributes the reader attributes passed to the {@link org.apache.lucene.codecs.Codec} layer of the
|
||||
* directory reader. This attribute map is forwarded to all leaf readers as well as to the readers
|
||||
* that are opened subsequently via the different flavors of {@link DirectoryReader#openIfChanged(DirectoryReader)}
|
||||
* @throws IOException if there is a low-level IO error
|
||||
*/
|
||||
public static DirectoryReader open(final Directory directory, final Map<String, String> readerAttributes) throws IOException {
|
||||
return StandardDirectoryReader.open(directory, null, readerAttributes);
|
||||
return StandardDirectoryReader.open(directory, null);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -122,19 +109,7 @@ public abstract class DirectoryReader extends BaseCompositeReader<LeafReader> {
|
||||
* @throws IOException if there is a low-level IO error
|
||||
*/
|
||||
public static DirectoryReader open(final IndexCommit commit) throws IOException {
|
||||
return open(commit, Collections.emptyMap());
|
||||
}
|
||||
|
||||
/** Expert: returns an IndexReader reading the index in the given
|
||||
* {@link IndexCommit}.
|
||||
* @param commit the commit point to open
|
||||
* @param readerAttributes the reader attributes passed to the {@link org.apache.lucene.codecs.Codec} layer of the
|
||||
* directory reader. This attribute map is forwarded to all leaf readers as well as to the readers
|
||||
* that are opened subsequently via the different flavors of {@link DirectoryReader#openIfChanged(DirectoryReader)}
|
||||
* @throws IOException if there is a low-level IO error
|
||||
*/
|
||||
public static DirectoryReader open(final IndexCommit commit, Map<String, String> readerAttributes) throws IOException {
|
||||
return StandardDirectoryReader.open(commit.getDirectory(), commit, readerAttributes);
|
||||
return StandardDirectoryReader.open(commit.getDirectory(), commit);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -525,7 +525,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable,
|
||||
// reader; in theory we could instead do similar retry logic,
|
||||
// just like we do when loading segments_N
|
||||
|
||||
r = StandardDirectoryReader.open(this, segmentInfos, applyAllDeletes, writeAllDeletes, config.getReaderAttributes());
|
||||
r = StandardDirectoryReader.open(this, segmentInfos, applyAllDeletes, writeAllDeletes);
|
||||
if (infoStream.isEnabled("IW")) {
|
||||
infoStream.message("IW", "return reader version=" + r.getVersion() + " reader=" + r);
|
||||
}
|
||||
@ -887,7 +887,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable,
|
||||
enableTestPoints, this::newSegmentName,
|
||||
config, directoryOrig, directory, globalFieldNumberMap);
|
||||
readerPool = new ReaderPool(directory, directoryOrig, segmentInfos, globalFieldNumberMap,
|
||||
bufferedUpdatesStream::getCompletedDelGen, infoStream, conf.getSoftDeletesField(), reader, config.getReaderAttributes());
|
||||
bufferedUpdatesStream::getCompletedDelGen, infoStream, conf.getSoftDeletesField(), reader);
|
||||
if (config.getReaderPooling()) {
|
||||
readerPool.enableReaderPooling();
|
||||
}
|
||||
|
@ -20,8 +20,6 @@ package org.apache.lucene.index;
|
||||
import java.io.PrintStream;
|
||||
import java.util.Arrays;
|
||||
import java.util.EnumSet;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
@ -566,14 +564,4 @@ public final class IndexWriterConfig extends LiveIndexWriterConfig {
|
||||
this.softDeletesField = softDeletesField;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the reader attributes used for all readers pulled from the IndexWriter. Reader attributes allow configuration
|
||||
* of low-level aspects like ram utilization on a per-reader basis.
|
||||
* Note: This method make a shallow copy of the provided map.
|
||||
*/
|
||||
public IndexWriterConfig setReaderAttributes(Map<String, String> readerAttributes) {
|
||||
this.readerAttributes = Map.copyOf(Objects.requireNonNull(readerAttributes));
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
@ -18,7 +18,6 @@ package org.apache.lucene.index;
|
||||
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
@ -114,9 +113,6 @@ public class LiveIndexWriterConfig {
|
||||
/** soft deletes field */
|
||||
protected String softDeletesField = null;
|
||||
|
||||
/** the attributes for the NRT readers */
|
||||
protected Map<String, String> readerAttributes = Collections.emptyMap();
|
||||
|
||||
/** Amount of time to wait for merges returned by MergePolicy.findFullFlushMerges(...) */
|
||||
protected volatile double maxCommitMergeWaitSeconds;
|
||||
|
||||
@ -528,16 +524,8 @@ public class LiveIndexWriterConfig {
|
||||
sb.append("indexSort=").append(getIndexSort()).append("\n");
|
||||
sb.append("checkPendingFlushOnUpdate=").append(isCheckPendingFlushOnUpdate()).append("\n");
|
||||
sb.append("softDeletesField=").append(getSoftDeletesField()).append("\n");
|
||||
sb.append("readerAttributes=").append(getReaderAttributes()).append("\n");
|
||||
sb.append("maxCommitMergeWaitSeconds=").append(getMaxCommitMergeWaitSeconds()).append("\n");
|
||||
sb.append("indexWriterEvents=").append(getIndexWriterEvents().getClass().getName()).append("\n");
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the reader attributes passed to all published readers opened on or within the IndexWriter
|
||||
*/
|
||||
public Map<String, String> getReaderAttributes() {
|
||||
return this.readerAttributes;
|
||||
}
|
||||
}
|
||||
|
@ -54,7 +54,6 @@ final class ReaderPool implements Closeable {
|
||||
private final InfoStream infoStream;
|
||||
private final SegmentInfos segmentInfos;
|
||||
private final String softDeletesField;
|
||||
private final Map<String, String> readerAttributes;
|
||||
// This is a "write once" variable (like the organic dye
|
||||
// on a DVD-R that may or may not be heated by a laser and
|
||||
// then cooled to permanently record the event): it's
|
||||
@ -72,7 +71,7 @@ final class ReaderPool implements Closeable {
|
||||
|
||||
ReaderPool(Directory directory, Directory originalDirectory, SegmentInfos segmentInfos,
|
||||
FieldInfos.FieldNumbers fieldNumbers, LongSupplier completedDelGenSupplier, InfoStream infoStream,
|
||||
String softDeletesField, StandardDirectoryReader reader, Map<String, String> readerAttributes) throws IOException {
|
||||
String softDeletesField, StandardDirectoryReader reader) throws IOException {
|
||||
this.directory = directory;
|
||||
this.originalDirectory = originalDirectory;
|
||||
this.segmentInfos = segmentInfos;
|
||||
@ -80,7 +79,6 @@ final class ReaderPool implements Closeable {
|
||||
this.completedDelGenSupplier = completedDelGenSupplier;
|
||||
this.infoStream = infoStream;
|
||||
this.softDeletesField = softDeletesField;
|
||||
this.readerAttributes = readerAttributes;
|
||||
if (reader != null) {
|
||||
// Pre-enroll all segment readers into the reader pool; this is necessary so
|
||||
// any in-memory NRT live docs are correctly carried over, and so NRT readers
|
||||
@ -93,7 +91,7 @@ final class ReaderPool implements Closeable {
|
||||
SegmentReader newReader = new SegmentReader(segmentInfos.info(i), segReader, segReader.getLiveDocs(),
|
||||
segReader.getHardLiveDocs(), segReader.numDocs(), true);
|
||||
readerMap.put(newReader.getOriginalSegmentInfo(), new ReadersAndUpdates(segmentInfos.getIndexCreatedVersionMajor(),
|
||||
newReader, newPendingDeletes(newReader, newReader.getOriginalSegmentInfo()), readerAttributes));
|
||||
newReader, newPendingDeletes(newReader, newReader.getOriginalSegmentInfo())));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -374,7 +372,7 @@ final class ReaderPool implements Closeable {
|
||||
if (create == false) {
|
||||
return null;
|
||||
}
|
||||
rld = new ReadersAndUpdates(segmentInfos.getIndexCreatedVersionMajor(), info, newPendingDeletes(info), readerAttributes);
|
||||
rld = new ReadersAndUpdates(segmentInfos.getIndexCreatedVersionMajor(), info, newPendingDeletes(info));
|
||||
// Steal initial reference:
|
||||
readerMap.put(info, rld);
|
||||
} else {
|
||||
|
@ -36,7 +36,6 @@ import org.apache.lucene.codecs.Codec;
|
||||
import org.apache.lucene.codecs.DocValuesConsumer;
|
||||
import org.apache.lucene.codecs.DocValuesFormat;
|
||||
import org.apache.lucene.codecs.FieldInfosFormat;
|
||||
import org.apache.lucene.codecs.blocktree.BlockTreeTermsReader;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.FlushInfo;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
@ -89,22 +88,17 @@ final class ReadersAndUpdates {
|
||||
|
||||
final AtomicLong ramBytesUsed = new AtomicLong();
|
||||
|
||||
private final Map<String, String> readerAttributes;
|
||||
|
||||
ReadersAndUpdates(int indexCreatedVersionMajor, SegmentCommitInfo info,
|
||||
PendingDeletes pendingDeletes, Map<String, String> readerAttributes) {
|
||||
ReadersAndUpdates(int indexCreatedVersionMajor, SegmentCommitInfo info, PendingDeletes pendingDeletes) {
|
||||
this.info = info;
|
||||
this.pendingDeletes = pendingDeletes;
|
||||
this.indexCreatedVersionMajor = indexCreatedVersionMajor;
|
||||
this.readerAttributes = readerAttributes;
|
||||
}
|
||||
|
||||
/** Init from a previously opened SegmentReader.
|
||||
*
|
||||
* <p>NOTE: steals incoming ref from reader. */
|
||||
ReadersAndUpdates(int indexCreatedVersionMajor, SegmentReader reader, PendingDeletes pendingDeletes,
|
||||
Map<String, String> readerAttributes) throws IOException {
|
||||
this(indexCreatedVersionMajor, reader.getOriginalSegmentInfo(), pendingDeletes, readerAttributes);
|
||||
ReadersAndUpdates(int indexCreatedVersionMajor, SegmentReader reader, PendingDeletes pendingDeletes) throws IOException {
|
||||
this(indexCreatedVersionMajor, reader.getOriginalSegmentInfo(), pendingDeletes);
|
||||
this.reader = reader;
|
||||
pendingDeletes.onNewReader(reader, info);
|
||||
}
|
||||
@ -174,7 +168,7 @@ final class ReadersAndUpdates {
|
||||
public synchronized SegmentReader getReader(IOContext context) throws IOException {
|
||||
if (reader == null) {
|
||||
// We steal returned ref:
|
||||
reader = new SegmentReader(info, indexCreatedVersionMajor, true, context, readerAttributes);
|
||||
reader = new SegmentReader(info, indexCreatedVersionMajor, true, context);
|
||||
pendingDeletes.onNewReader(reader, info);
|
||||
}
|
||||
|
||||
@ -541,9 +535,7 @@ final class ReadersAndUpdates {
|
||||
// IndexWriter.commitMergedDeletes).
|
||||
final SegmentReader reader;
|
||||
if (this.reader == null) {
|
||||
reader = new SegmentReader(info, indexCreatedVersionMajor, true, IOContext.READONCE,
|
||||
// we don't need terms - lets leave them off-heap if possible
|
||||
Collections.singletonMap(BlockTreeTermsReader.FST_MODE_KEY, BlockTreeTermsReader.FSTLoadMode.OFF_HEAP.name()));
|
||||
reader = new SegmentReader(info, indexCreatedVersionMajor, true, IOContext.READONCE);
|
||||
pendingDeletes.onNewReader(reader, info);
|
||||
} else {
|
||||
reader = this.reader;
|
||||
|
@ -24,7 +24,6 @@ import java.io.IOException;
|
||||
import java.nio.file.NoSuchFileException;
|
||||
import java.util.Collections;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
@ -90,7 +89,7 @@ final class SegmentCoreReaders {
|
||||
private final Set<IndexReader.ClosedListener> coreClosedListeners =
|
||||
Collections.synchronizedSet(new LinkedHashSet<IndexReader.ClosedListener>());
|
||||
|
||||
SegmentCoreReaders(Directory dir, SegmentCommitInfo si, boolean openedFromWriter, IOContext context, Map<String, String> readerAttributes) throws IOException {
|
||||
SegmentCoreReaders(Directory dir, SegmentCommitInfo si, boolean openedFromWriter, IOContext context) throws IOException {
|
||||
|
||||
final Codec codec = si.info.getCodec();
|
||||
final Directory cfsDir; // confusing name: if (cfs) it's the cfsdir, otherwise it's the segment's directory.
|
||||
@ -108,7 +107,7 @@ final class SegmentCoreReaders {
|
||||
|
||||
coreFieldInfos = codec.fieldInfosFormat().read(cfsDir, si.info, "", context);
|
||||
|
||||
final SegmentReadState segmentReadState = new SegmentReadState(cfsDir, si.info, coreFieldInfos, openedFromWriter, context, readerAttributes);
|
||||
final SegmentReadState segmentReadState = new SegmentReadState(cfsDir, si.info, coreFieldInfos, openedFromWriter, context);
|
||||
final PostingsFormat format = codec.postingsFormat();
|
||||
// Ask codec for its Fields
|
||||
fields = format.fieldsProducer(segmentReadState);
|
||||
|
@ -18,7 +18,6 @@ package org.apache.lucene.index;
|
||||
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
@ -47,7 +46,7 @@ final class SegmentDocValues {
|
||||
}
|
||||
|
||||
// set SegmentReadState to list only the fields that are relevant to that gen
|
||||
SegmentReadState srs = new SegmentReadState(dvDir, si.info, infos, false, IOContext.READ, segmentSuffix, Collections.emptyMap());
|
||||
SegmentReadState srs = new SegmentReadState(dvDir, si.info, infos, false, IOContext.READ, segmentSuffix);
|
||||
DocValuesFormat dvFormat = si.info.getCodec().docValuesFormat();
|
||||
return new RefCount<DocValuesProducer>(dvFormat.fieldsProducer(srs)) {
|
||||
@SuppressWarnings("synthetic-access")
|
||||
|
@ -18,7 +18,6 @@ package org.apache.lucene.index;
|
||||
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.lucene.codecs.Codec;
|
||||
@ -113,7 +112,7 @@ final class SegmentMerger {
|
||||
final SegmentWriteState segmentWriteState = new SegmentWriteState(mergeState.infoStream, directory, mergeState.segmentInfo,
|
||||
mergeState.mergeFieldInfos, null, context);
|
||||
final SegmentReadState segmentReadState = new SegmentReadState(directory, mergeState.segmentInfo, mergeState.mergeFieldInfos,
|
||||
true, IOContext.READ, segmentWriteState.segmentSuffix, Collections.emptyMap());
|
||||
true, IOContext.READ, segmentWriteState.segmentSuffix);
|
||||
|
||||
if (mergeState.mergeFieldInfos.hasNorms()) {
|
||||
if (mergeState.infoStream.isEnabled("SM")) {
|
||||
|
@ -16,9 +16,6 @@
|
||||
*/
|
||||
package org.apache.lucene.index;
|
||||
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.lucene.codecs.PostingsFormat; // javadocs
|
||||
import org.apache.lucene.codecs.perfield.PerFieldPostingsFormat; // javadocs
|
||||
import org.apache.lucene.store.Directory;
|
||||
@ -56,16 +53,10 @@ public class SegmentReadState {
|
||||
*/
|
||||
public final boolean openedFromWriter;
|
||||
|
||||
/**
|
||||
* The reader attributes for this reader. This is used to configure low level options on the codec layer.
|
||||
* This attribute map is user supplied at reader creation time.
|
||||
*/
|
||||
public final Map<String, String> readerAttributes;
|
||||
|
||||
/** Create a {@code SegmentReadState}. */
|
||||
public SegmentReadState(Directory dir, SegmentInfo info,
|
||||
FieldInfos fieldInfos, boolean openedFromWriter, IOContext context, Map<String, String> readerAttributes) {
|
||||
this(dir, info, fieldInfos, openedFromWriter, context, "", readerAttributes);
|
||||
FieldInfos fieldInfos, boolean openedFromWriter, IOContext context) {
|
||||
this(dir, info, fieldInfos, openedFromWriter, context, "");
|
||||
}
|
||||
|
||||
/** Create a {@code SegmentReadState}. */
|
||||
@ -73,14 +64,13 @@ public class SegmentReadState {
|
||||
SegmentInfo info,
|
||||
FieldInfos fieldInfos,
|
||||
boolean openedFromWriter, IOContext context,
|
||||
String segmentSuffix, Map<String, String> readerAttributes) {
|
||||
String segmentSuffix) {
|
||||
this.directory = dir;
|
||||
this.segmentInfo = info;
|
||||
this.fieldInfos = fieldInfos;
|
||||
this.context = context;
|
||||
this.segmentSuffix = segmentSuffix;
|
||||
this.openedFromWriter = openedFromWriter;
|
||||
this.readerAttributes = Map.copyOf(readerAttributes);
|
||||
}
|
||||
|
||||
/** Create a {@code SegmentReadState}. */
|
||||
@ -92,6 +82,5 @@ public class SegmentReadState {
|
||||
this.context = other.context;
|
||||
this.openedFromWriter = other.openedFromWriter;
|
||||
this.segmentSuffix = newSegmentSuffix;
|
||||
this.readerAttributes = other.readerAttributes;
|
||||
}
|
||||
}
|
||||
|
@ -19,7 +19,6 @@ package org.apache.lucene.index;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CopyOnWriteArraySet;
|
||||
|
||||
@ -73,7 +72,7 @@ public final class SegmentReader extends CodecReader {
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws IOException if there is a low-level IO error
|
||||
*/
|
||||
SegmentReader(SegmentCommitInfo si, int createdVersionMajor, boolean openedFromWriter, IOContext context, Map<String, String> readerAttributes) throws IOException {
|
||||
SegmentReader(SegmentCommitInfo si, int createdVersionMajor, boolean openedFromWriter, IOContext context) throws IOException {
|
||||
this.si = si.clone();
|
||||
this.originalSi = si;
|
||||
this.metaData = new LeafMetaData(createdVersionMajor, si.info.getMinVersion(), si.info.getIndexSort());
|
||||
@ -81,7 +80,7 @@ public final class SegmentReader extends CodecReader {
|
||||
// We pull liveDocs/DV updates from disk:
|
||||
this.isNRT = false;
|
||||
|
||||
core = new SegmentCoreReaders(si.info.dir, si, openedFromWriter, context, readerAttributes);
|
||||
core = new SegmentCoreReaders(si.info.dir, si, openedFromWriter, context);
|
||||
segDocValues = new SegmentDocValues();
|
||||
|
||||
boolean success = false;
|
||||
|
@ -42,22 +42,19 @@ public final class StandardDirectoryReader extends DirectoryReader {
|
||||
final SegmentInfos segmentInfos;
|
||||
private final boolean applyAllDeletes;
|
||||
private final boolean writeAllDeletes;
|
||||
private final Map<String, String> readerAttributes;
|
||||
|
||||
|
||||
/** called only from static open() methods */
|
||||
StandardDirectoryReader(Directory directory, LeafReader[] readers, IndexWriter writer,
|
||||
SegmentInfos sis, boolean applyAllDeletes, boolean writeAllDeletes,
|
||||
Map<String, String> readerAttributes) throws IOException {
|
||||
SegmentInfos sis, boolean applyAllDeletes, boolean writeAllDeletes) throws IOException {
|
||||
super(directory, readers);
|
||||
this.writer = writer;
|
||||
this.segmentInfos = sis;
|
||||
this.applyAllDeletes = applyAllDeletes;
|
||||
this.writeAllDeletes = writeAllDeletes;
|
||||
this.readerAttributes = Map.copyOf(readerAttributes);
|
||||
}
|
||||
|
||||
/** called from DirectoryReader.open(...) methods */
|
||||
static DirectoryReader open(final Directory directory, final IndexCommit commit, Map<String, String> readerAttributes) throws IOException {
|
||||
static DirectoryReader open(final Directory directory, final IndexCommit commit) throws IOException {
|
||||
return new SegmentInfos.FindSegmentsFile<DirectoryReader>(directory) {
|
||||
@Override
|
||||
protected DirectoryReader doBody(String segmentFileName) throws IOException {
|
||||
@ -66,12 +63,12 @@ public final class StandardDirectoryReader extends DirectoryReader {
|
||||
boolean success = false;
|
||||
try {
|
||||
for (int i = sis.size()-1; i >= 0; i--) {
|
||||
readers[i] = new SegmentReader(sis.info(i), sis.getIndexCreatedVersionMajor(), false, IOContext.READ, readerAttributes);
|
||||
readers[i] = new SegmentReader(sis.info(i), sis.getIndexCreatedVersionMajor(), false, IOContext.READ);
|
||||
}
|
||||
|
||||
// This may throw CorruptIndexException if there are too many docs, so
|
||||
// it must be inside try clause so we close readers in that case:
|
||||
DirectoryReader reader = new StandardDirectoryReader(directory, readers, null, sis, false, false, readerAttributes);
|
||||
DirectoryReader reader = new StandardDirectoryReader(directory, readers, null, sis, false, false);
|
||||
success = true;
|
||||
|
||||
return reader;
|
||||
@ -85,7 +82,7 @@ public final class StandardDirectoryReader extends DirectoryReader {
|
||||
}
|
||||
|
||||
/** Used by near real-time search */
|
||||
static DirectoryReader open(IndexWriter writer, SegmentInfos infos, boolean applyAllDeletes, boolean writeAllDeletes, Map<String, String> readerAttributes) throws IOException {
|
||||
static DirectoryReader open(IndexWriter writer, SegmentInfos infos, boolean applyAllDeletes, boolean writeAllDeletes) throws IOException {
|
||||
// IndexWriter synchronizes externally before calling
|
||||
// us, which ensures infos will not change; so there's
|
||||
// no need to process segments in reverse order
|
||||
@ -124,7 +121,7 @@ public final class StandardDirectoryReader extends DirectoryReader {
|
||||
|
||||
StandardDirectoryReader result = new StandardDirectoryReader(dir,
|
||||
readers.toArray(new SegmentReader[readers.size()]), writer,
|
||||
segmentInfos, applyAllDeletes, writeAllDeletes, readerAttributes);
|
||||
segmentInfos, applyAllDeletes, writeAllDeletes);
|
||||
return result;
|
||||
} catch (Throwable t) {
|
||||
try {
|
||||
@ -139,7 +136,7 @@ public final class StandardDirectoryReader extends DirectoryReader {
|
||||
/** This constructor is only used for {@link #doOpenIfChanged(SegmentInfos)}, as well as NRT replication.
|
||||
*
|
||||
* @lucene.internal */
|
||||
public static DirectoryReader open(Directory directory, SegmentInfos infos, List<? extends LeafReader> oldReaders, Map<String, String> readerAttributes) throws IOException {
|
||||
public static DirectoryReader open(Directory directory, SegmentInfos infos, List<? extends LeafReader> oldReaders) throws IOException {
|
||||
|
||||
// we put the old SegmentReaders in a map, that allows us
|
||||
// to lookup a reader using its segment name
|
||||
@ -179,7 +176,7 @@ public final class StandardDirectoryReader extends DirectoryReader {
|
||||
SegmentReader newReader;
|
||||
if (oldReader == null || commitInfo.info.getUseCompoundFile() != oldReader.getSegmentInfo().info.getUseCompoundFile()) {
|
||||
// this is a new reader; in case we hit an exception we can decRef it safely
|
||||
newReader = new SegmentReader(commitInfo, infos.getIndexCreatedVersionMajor(), false, IOContext.READ, readerAttributes);
|
||||
newReader = new SegmentReader(commitInfo, infos.getIndexCreatedVersionMajor(), false, IOContext.READ);
|
||||
newReaders[i] = newReader;
|
||||
} else {
|
||||
if (oldReader.isNRT) {
|
||||
@ -221,7 +218,7 @@ public final class StandardDirectoryReader extends DirectoryReader {
|
||||
}
|
||||
}
|
||||
}
|
||||
return new StandardDirectoryReader(directory, newReaders, null, infos, false, false, readerAttributes);
|
||||
return new StandardDirectoryReader(directory, newReaders, null, infos, false, false);
|
||||
}
|
||||
|
||||
// TODO: move somewhere shared if it's useful elsewhere
|
||||
@ -334,7 +331,7 @@ public final class StandardDirectoryReader extends DirectoryReader {
|
||||
}
|
||||
|
||||
DirectoryReader doOpenIfChanged(SegmentInfos infos) throws IOException {
|
||||
return StandardDirectoryReader.open(directory, infos, getSequentialSubReaders(), readerAttributes);
|
||||
return StandardDirectoryReader.open(directory, infos, getSequentialSubReaders());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -17,22 +17,15 @@
|
||||
package org.apache.lucene.codecs.lucene84;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.codecs.Codec;
|
||||
import org.apache.lucene.codecs.CompetitiveImpactAccumulator;
|
||||
import org.apache.lucene.codecs.blocktree.BlockTreeTermsReader;
|
||||
import org.apache.lucene.codecs.blocktree.FieldReader;
|
||||
import org.apache.lucene.codecs.blocktree.Stats;
|
||||
import org.apache.lucene.codecs.lucene84.Lucene84PostingsFormat;
|
||||
import org.apache.lucene.codecs.lucene84.Lucene84ScoreSkipReader;
|
||||
import org.apache.lucene.codecs.lucene84.Lucene84SkipWriter;
|
||||
import org.apache.lucene.codecs.lucene84.Lucene84ScoreSkipReader.MutableImpactList;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
@ -41,16 +34,11 @@ import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.Impact;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.store.ByteArrayDataInput;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.FSDirectory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
import org.apache.lucene.store.MMapDirectory;
|
||||
import org.apache.lucene.store.SimpleFSDirectory;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
public class TestLucene84PostingsFormat extends BasePostingsFormatTestCase {
|
||||
@ -61,202 +49,6 @@ public class TestLucene84PostingsFormat extends BasePostingsFormatTestCase {
|
||||
return codec;
|
||||
}
|
||||
|
||||
public void testFstOffHeap() throws IOException {
|
||||
Path tempDir = createTempDir();
|
||||
try (Directory d = FSDirectory.open(tempDir)) {
|
||||
assumeTrue("only works with mmap directory", d instanceof MMapDirectory);
|
||||
try (IndexWriter w = new IndexWriter(d, new IndexWriterConfig(new MockAnalyzer(random())))) {
|
||||
DirectoryReader readerFromWriter = DirectoryReader.open(w);
|
||||
for (int i = 0; i < 50; i++) {
|
||||
Document doc = new Document();
|
||||
doc.add(newStringField("id", "" + i, Field.Store.NO));
|
||||
doc.add(newStringField("field", Character.toString((char) (97 + i)), Field.Store.NO));
|
||||
doc.add(newStringField("field", Character.toString((char) (98 + i)), Field.Store.NO));
|
||||
if (rarely()) {
|
||||
w.addDocument(doc);
|
||||
} else {
|
||||
w.updateDocument(new Term("id", "" + i), doc);
|
||||
}
|
||||
if (random().nextBoolean()) {
|
||||
w.commit();
|
||||
}
|
||||
|
||||
if (random().nextBoolean()) {
|
||||
DirectoryReader newReader = DirectoryReader.openIfChanged(readerFromWriter);
|
||||
if (newReader != null) {
|
||||
readerFromWriter.close();
|
||||
readerFromWriter = newReader;
|
||||
}
|
||||
for (LeafReaderContext leaf : readerFromWriter.leaves()) {
|
||||
FieldReader field = (FieldReader) leaf.reader().terms("field");
|
||||
FieldReader id = (FieldReader) leaf.reader().terms("id");
|
||||
assertFalse(id.isFstOffHeap());
|
||||
assertTrue(field.isFstOffHeap());
|
||||
}
|
||||
}
|
||||
}
|
||||
readerFromWriter.close();
|
||||
|
||||
w.forceMerge(1);
|
||||
try (DirectoryReader r = DirectoryReader.open(w)) {
|
||||
assertEquals(1, r.leaves().size());
|
||||
FieldReader field = (FieldReader) r.leaves().get(0).reader().terms("field");
|
||||
FieldReader id = (FieldReader) r.leaves().get(0).reader().terms("id");
|
||||
assertFalse(id.isFstOffHeap());
|
||||
assertTrue(field.isFstOffHeap());
|
||||
}
|
||||
w.commit();
|
||||
try (DirectoryReader r = DirectoryReader.open(d)) {
|
||||
assertEquals(1, r.leaves().size());
|
||||
FieldReader field = (FieldReader) r.leaves().get(0).reader().terms("field");
|
||||
FieldReader id = (FieldReader) r.leaves().get(0).reader().terms("id");
|
||||
assertTrue(id.isFstOffHeap());
|
||||
assertTrue(field.isFstOffHeap());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
try (Directory d = new SimpleFSDirectory(tempDir)) {
|
||||
// test auto
|
||||
try (DirectoryReader r = DirectoryReader.open(d)) {
|
||||
assertEquals(1, r.leaves().size());
|
||||
FieldReader field = (FieldReader) r.leaves().get(0).reader().terms("field");
|
||||
FieldReader id = (FieldReader) r.leaves().get(0).reader().terms("id");
|
||||
assertFalse(id.isFstOffHeap());
|
||||
assertFalse(field.isFstOffHeap());
|
||||
}
|
||||
}
|
||||
|
||||
try (Directory d = new SimpleFSDirectory(tempDir)) {
|
||||
// test per field
|
||||
Map<String, String> readerAttributes = new HashMap<>();
|
||||
readerAttributes.put(BlockTreeTermsReader.FST_MODE_KEY, BlockTreeTermsReader.FSTLoadMode.OFF_HEAP.name());
|
||||
readerAttributes.put(BlockTreeTermsReader.FST_MODE_KEY + ".field", BlockTreeTermsReader.FSTLoadMode.ON_HEAP.name());
|
||||
try (DirectoryReader r = DirectoryReader.open(d, readerAttributes)) {
|
||||
assertEquals(1, r.leaves().size());
|
||||
FieldReader field = (FieldReader) r.leaves().get(0).reader().terms("field");
|
||||
FieldReader id = (FieldReader) r.leaves().get(0).reader().terms("id");
|
||||
assertTrue(id.isFstOffHeap());
|
||||
assertFalse(field.isFstOffHeap());
|
||||
}
|
||||
}
|
||||
|
||||
IllegalArgumentException invalid = expectThrows(IllegalArgumentException.class, () -> {
|
||||
try (Directory d = new SimpleFSDirectory(tempDir)) {
|
||||
Map<String, String> readerAttributes = new HashMap<>();
|
||||
readerAttributes.put(BlockTreeTermsReader.FST_MODE_KEY, "invalid");
|
||||
DirectoryReader.open(d, readerAttributes);
|
||||
}
|
||||
});
|
||||
|
||||
assertEquals("Invalid value for blocktree.terms.fst expected one of: [OFF_HEAP, ON_HEAP, OPTIMIZE_UPDATES_OFF_HEAP, AUTO] but was: invalid", invalid.getMessage());
|
||||
}
|
||||
|
||||
public void testDisableFSTOffHeap() throws IOException {
|
||||
Path tempDir = createTempDir();
|
||||
try (Directory d = MMapDirectory.open(tempDir)) {
|
||||
try (IndexWriter w = new IndexWriter(d, new IndexWriterConfig(new MockAnalyzer(random()))
|
||||
.setReaderAttributes(Collections.singletonMap(BlockTreeTermsReader.FST_MODE_KEY, BlockTreeTermsReader.FSTLoadMode.ON_HEAP.name())))) {
|
||||
assumeTrue("only works with mmap directory", d instanceof MMapDirectory);
|
||||
DirectoryReader readerFromWriter = DirectoryReader.open(w);
|
||||
for (int i = 0; i < 50; i++) {
|
||||
Document doc = new Document();
|
||||
doc.add(newStringField("id", "" + i, Field.Store.NO));
|
||||
doc.add(newStringField("field", Character.toString((char) (97 + i)), Field.Store.NO));
|
||||
doc.add(newStringField("field", Character.toString((char) (98 + i)), Field.Store.NO));
|
||||
if (rarely()) {
|
||||
w.addDocument(doc);
|
||||
} else {
|
||||
w.updateDocument(new Term("id", "" + i), doc);
|
||||
}
|
||||
if (random().nextBoolean()) {
|
||||
w.commit();
|
||||
}
|
||||
if (random().nextBoolean()) {
|
||||
DirectoryReader newReader = DirectoryReader.openIfChanged(readerFromWriter);
|
||||
if (newReader != null) {
|
||||
readerFromWriter.close();
|
||||
readerFromWriter = newReader;
|
||||
}
|
||||
for (LeafReaderContext leaf : readerFromWriter.leaves()) {
|
||||
FieldReader field = (FieldReader) leaf.reader().terms("field");
|
||||
FieldReader id = (FieldReader) leaf.reader().terms("id");
|
||||
assertFalse(id.isFstOffHeap());
|
||||
assertFalse(field.isFstOffHeap());
|
||||
}
|
||||
}
|
||||
}
|
||||
readerFromWriter.close();
|
||||
w.forceMerge(1);
|
||||
w.commit();
|
||||
}
|
||||
try (DirectoryReader r = DirectoryReader.open(d, Collections.singletonMap(BlockTreeTermsReader.FST_MODE_KEY, BlockTreeTermsReader.FSTLoadMode.ON_HEAP.name()))) {
|
||||
assertEquals(1, r.leaves().size());
|
||||
FieldReader field = (FieldReader) r.leaves().get(0).reader().terms("field");
|
||||
FieldReader id = (FieldReader) r.leaves().get(0).reader().terms("id");
|
||||
assertFalse(id.isFstOffHeap());
|
||||
assertFalse(field.isFstOffHeap());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testAlwaysFSTOffHeap() throws IOException {
|
||||
boolean alsoLoadIdOffHeap = random().nextBoolean();
|
||||
BlockTreeTermsReader.FSTLoadMode loadMode;
|
||||
if (alsoLoadIdOffHeap) {
|
||||
loadMode = BlockTreeTermsReader.FSTLoadMode.OFF_HEAP;
|
||||
} else {
|
||||
loadMode = BlockTreeTermsReader.FSTLoadMode.OPTIMIZE_UPDATES_OFF_HEAP;
|
||||
}
|
||||
try (Directory d = newDirectory()) { // any directory should work now
|
||||
try (IndexWriter w = new IndexWriter(d, new IndexWriterConfig(new MockAnalyzer(random()))
|
||||
.setReaderAttributes(Collections.singletonMap(BlockTreeTermsReader.FST_MODE_KEY, loadMode.name())))) {
|
||||
DirectoryReader readerFromWriter = DirectoryReader.open(w);
|
||||
for (int i = 0; i < 50; i++) {
|
||||
Document doc = new Document();
|
||||
doc.add(newStringField("id", "" + i, Field.Store.NO));
|
||||
doc.add(newStringField("field", Character.toString((char) (97 + i)), Field.Store.NO));
|
||||
doc.add(newStringField("field", Character.toString((char) (98 + i)), Field.Store.NO));
|
||||
if (rarely()) {
|
||||
w.addDocument(doc);
|
||||
} else {
|
||||
w.updateDocument(new Term("id", "" + i), doc);
|
||||
}
|
||||
if (random().nextBoolean()) {
|
||||
w.commit();
|
||||
}
|
||||
if (random().nextBoolean()) {
|
||||
DirectoryReader newReader = DirectoryReader.openIfChanged(readerFromWriter);
|
||||
if (newReader != null) {
|
||||
readerFromWriter.close();
|
||||
readerFromWriter = newReader;
|
||||
}
|
||||
for (LeafReaderContext leaf : readerFromWriter.leaves()) {
|
||||
FieldReader field = (FieldReader) leaf.reader().terms("field");
|
||||
FieldReader id = (FieldReader) leaf.reader().terms("id");
|
||||
if (alsoLoadIdOffHeap) {
|
||||
assertTrue(id.isFstOffHeap());
|
||||
} else {
|
||||
assertFalse(id.isFstOffHeap());
|
||||
}
|
||||
assertTrue(field.isFstOffHeap());
|
||||
}
|
||||
}
|
||||
}
|
||||
readerFromWriter.close();
|
||||
w.forceMerge(1);
|
||||
w.commit();
|
||||
}
|
||||
try (DirectoryReader r = DirectoryReader.open(d, Collections.singletonMap(BlockTreeTermsReader.FST_MODE_KEY, loadMode.name()))) {
|
||||
assertEquals(1, r.leaves().size());
|
||||
FieldReader field = (FieldReader) r.leaves().get(0).reader().terms("field");
|
||||
FieldReader id = (FieldReader) r.leaves().get(0).reader().terms("id");
|
||||
assertTrue(id.isFstOffHeap());
|
||||
assertTrue(field.isFstOffHeap());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Make sure the final sub-block(s) are not skipped. */
|
||||
public void testFinalBlock() throws Exception {
|
||||
Directory d = newDirectory();
|
||||
@ -283,7 +75,7 @@ public class TestLucene84PostingsFormat extends BasePostingsFormatTestCase {
|
||||
|
||||
private void shouldFail(int minItemsInBlock, int maxItemsInBlock) {
|
||||
expectThrows(IllegalArgumentException.class, () -> {
|
||||
new Lucene84PostingsFormat(minItemsInBlock, maxItemsInBlock, BlockTreeTermsReader.FSTLoadMode.AUTO);
|
||||
new Lucene84PostingsFormat(minItemsInBlock, maxItemsInBlock);
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -222,7 +222,7 @@ public class TestCodecs extends LuceneTestCase {
|
||||
final SegmentInfo si = new SegmentInfo(dir, Version.LATEST, Version.LATEST, SEGMENT, 10000, false, codec, Collections.emptyMap(), StringHelper.randomId(), new HashMap<>(), null);
|
||||
|
||||
this.write(si, fieldInfos, dir, fields);
|
||||
final FieldsProducer reader = codec.postingsFormat().fieldsProducer(new SegmentReadState(dir, si, fieldInfos, false, newIOContext(random()), Collections.emptyMap()));
|
||||
final FieldsProducer reader = codec.postingsFormat().fieldsProducer(new SegmentReadState(dir, si, fieldInfos, false, newIOContext(random())));
|
||||
|
||||
final Iterator<String> fieldsEnum = reader.iterator();
|
||||
String fieldName = fieldsEnum.next();
|
||||
@ -282,7 +282,7 @@ public class TestCodecs extends LuceneTestCase {
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: now read postings");
|
||||
}
|
||||
final FieldsProducer terms = codec.postingsFormat().fieldsProducer(new SegmentReadState(dir, si, fieldInfos, false, newIOContext(random()), Collections.emptyMap()));
|
||||
final FieldsProducer terms = codec.postingsFormat().fieldsProducer(new SegmentReadState(dir, si, fieldInfos, false, newIOContext(random())));
|
||||
|
||||
final Verify[] threads = new Verify[NUM_TEST_THREADS-1];
|
||||
for(int i=0;i<NUM_TEST_THREADS-1;i++) {
|
||||
|
@ -416,7 +416,7 @@ public class TestDemoParallelLeafReader extends LuceneTestCase {
|
||||
|
||||
SegmentInfos infos = SegmentInfos.readLatestCommit(dir);
|
||||
assert infos.size() == 1;
|
||||
final LeafReader parLeafReader = new SegmentReader(infos.info(0), Version.LATEST.major, false, IOContext.DEFAULT, Collections.emptyMap());
|
||||
final LeafReader parLeafReader = new SegmentReader(infos.info(0), Version.LATEST.major, false, IOContext.DEFAULT);
|
||||
|
||||
//checkParallelReader(leaf, parLeafReader, schemaGen);
|
||||
|
||||
|
@ -213,8 +213,8 @@ public class TestDoc extends LuceneTestCase {
|
||||
private SegmentCommitInfo merge(Directory dir, SegmentCommitInfo si1, SegmentCommitInfo si2, String merged, boolean useCompoundFile)
|
||||
throws Exception {
|
||||
IOContext context = newIOContext(random(), new IOContext(new MergeInfo(-1, -1, false, -1)));
|
||||
SegmentReader r1 = new SegmentReader(si1, Version.LATEST.major, false, context, Collections.emptyMap());
|
||||
SegmentReader r2 = new SegmentReader(si2, Version.LATEST.major, false, context, Collections.emptyMap());
|
||||
SegmentReader r1 = new SegmentReader(si1, Version.LATEST.major, false, context);
|
||||
SegmentReader r2 = new SegmentReader(si2, Version.LATEST.major, false, context);
|
||||
|
||||
final Codec codec = Codec.getDefault();
|
||||
TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(si1.info.dir);
|
||||
@ -244,7 +244,7 @@ public class TestDoc extends LuceneTestCase {
|
||||
|
||||
private void printSegment(PrintWriter out, SegmentCommitInfo si)
|
||||
throws Exception {
|
||||
SegmentReader reader = new SegmentReader(si, Version.LATEST.major, false, newIOContext(random()), Collections.emptyMap());
|
||||
SegmentReader reader = new SegmentReader(si, Version.LATEST.major, false, newIOContext(random()));
|
||||
|
||||
for (int i = 0; i < reader.numDocs(); i++)
|
||||
out.println(reader.document(i));
|
||||
|
@ -18,7 +18,6 @@ package org.apache.lucene.index;
|
||||
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
|
||||
import org.apache.lucene.analysis.*;
|
||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
@ -64,7 +63,7 @@ public class TestDocumentWriter extends LuceneTestCase {
|
||||
SegmentCommitInfo info = writer.newestSegment();
|
||||
writer.close();
|
||||
//After adding the document, we should be able to read it back in
|
||||
SegmentReader reader = new SegmentReader(info, Version.LATEST.major, false, newIOContext(random()), Collections.emptyMap());
|
||||
SegmentReader reader = new SegmentReader(info, Version.LATEST.major, false, newIOContext(random()));
|
||||
assertTrue(reader != null);
|
||||
Document doc = reader.document(0);
|
||||
assertTrue(doc != null);
|
||||
@ -125,7 +124,7 @@ public class TestDocumentWriter extends LuceneTestCase {
|
||||
writer.commit();
|
||||
SegmentCommitInfo info = writer.newestSegment();
|
||||
writer.close();
|
||||
SegmentReader reader = new SegmentReader(info, Version.LATEST.major, false, newIOContext(random()), Collections.emptyMap());
|
||||
SegmentReader reader = new SegmentReader(info, Version.LATEST.major, false, newIOContext(random()));
|
||||
|
||||
PostingsEnum termPositions = MultiTerms.getTermPostingsEnum(reader, "repeated", new BytesRef("repeated"));
|
||||
assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
@ -196,7 +195,7 @@ public class TestDocumentWriter extends LuceneTestCase {
|
||||
writer.commit();
|
||||
SegmentCommitInfo info = writer.newestSegment();
|
||||
writer.close();
|
||||
SegmentReader reader = new SegmentReader(info, Version.LATEST.major, false, newIOContext(random()), Collections.emptyMap());
|
||||
SegmentReader reader = new SegmentReader(info, Version.LATEST.major, false, newIOContext(random()));
|
||||
|
||||
PostingsEnum termPositions = MultiTerms.getTermPostingsEnum(reader, "f1", new BytesRef("a"));
|
||||
assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
@ -238,7 +237,7 @@ public class TestDocumentWriter extends LuceneTestCase {
|
||||
writer.commit();
|
||||
SegmentCommitInfo info = writer.newestSegment();
|
||||
writer.close();
|
||||
SegmentReader reader = new SegmentReader(info, Version.LATEST.major, false, newIOContext(random()), Collections.emptyMap());
|
||||
SegmentReader reader = new SegmentReader(info, Version.LATEST.major, false, newIOContext(random()));
|
||||
|
||||
PostingsEnum termPositions = reader.postings(new Term("preanalyzed", "term1"), PostingsEnum.ALL);
|
||||
assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
|
@ -19,7 +19,6 @@ package org.apache.lucene.index;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
@ -334,7 +333,7 @@ public class TestIndexWriterThreadsToSegments extends LuceneTestCase {
|
||||
SegmentInfo si = TestUtil.getDefaultCodec().segmentInfoFormat().read(dir, segName, id, IOContext.DEFAULT);
|
||||
si.setCodec(codec);
|
||||
SegmentCommitInfo sci = new SegmentCommitInfo(si, 0, 0, -1, -1, -1);
|
||||
SegmentReader sr = new SegmentReader(sci, Version.LATEST.major, false, IOContext.DEFAULT, Collections.emptyMap());
|
||||
SegmentReader sr = new SegmentReader(sci, Version.LATEST.major, false, IOContext.DEFAULT);
|
||||
try {
|
||||
thread0Count += sr.docFreq(new Term("field", "threadID0"));
|
||||
thread1Count += sr.docFreq(new Term("field", "threadID1"));
|
||||
|
@ -45,7 +45,7 @@ public class TestReaderPool extends LuceneTestCase {
|
||||
StandardDirectoryReader reader = (StandardDirectoryReader) DirectoryReader.open(directory);
|
||||
SegmentInfos segmentInfos = reader.segmentInfos.clone();
|
||||
|
||||
ReaderPool pool = new ReaderPool(directory, directory, segmentInfos, fieldNumbers, () -> 0l, null, null, null, Collections.emptyMap());
|
||||
ReaderPool pool = new ReaderPool(directory, directory, segmentInfos, fieldNumbers, () -> 0l, null, null, null);
|
||||
SegmentCommitInfo commitInfo = RandomPicks.randomFrom(random(), segmentInfos.asList());
|
||||
ReadersAndUpdates readersAndUpdates = pool.get(commitInfo, true);
|
||||
assertSame(readersAndUpdates, pool.get(commitInfo, false));
|
||||
@ -64,7 +64,7 @@ public class TestReaderPool extends LuceneTestCase {
|
||||
StandardDirectoryReader reader = (StandardDirectoryReader) DirectoryReader.open(directory);
|
||||
SegmentInfos segmentInfos = reader.segmentInfos.clone();
|
||||
|
||||
ReaderPool pool = new ReaderPool(directory, directory, segmentInfos, fieldNumbers, () -> 0l, null, null, null, Collections.emptyMap());
|
||||
ReaderPool pool = new ReaderPool(directory, directory, segmentInfos, fieldNumbers, () -> 0l, null, null, null);
|
||||
SegmentCommitInfo commitInfo = RandomPicks.randomFrom(random(), segmentInfos.asList());
|
||||
assertFalse(pool.isReaderPoolingEnabled());
|
||||
pool.release(pool.get(commitInfo, true), random().nextBoolean());
|
||||
@ -100,7 +100,7 @@ public class TestReaderPool extends LuceneTestCase {
|
||||
StandardDirectoryReader reader = (StandardDirectoryReader) DirectoryReader.open(directory);
|
||||
SegmentInfos segmentInfos = reader.segmentInfos.clone();
|
||||
ReaderPool pool = new ReaderPool(directory, directory, segmentInfos, fieldNumbers, () -> 0l,
|
||||
new NullInfoStream(), null, null, Collections.emptyMap());
|
||||
new NullInfoStream(), null, null);
|
||||
int id = random().nextInt(10);
|
||||
if (random().nextBoolean()) {
|
||||
pool.enableReaderPooling();
|
||||
@ -168,7 +168,7 @@ public class TestReaderPool extends LuceneTestCase {
|
||||
StandardDirectoryReader reader = (StandardDirectoryReader) DirectoryReader.open(directory);
|
||||
SegmentInfos segmentInfos = reader.segmentInfos.clone();
|
||||
ReaderPool pool = new ReaderPool(directory, directory, segmentInfos, fieldNumbers, () -> 0l,
|
||||
new NullInfoStream(), null, null, Collections.emptyMap());
|
||||
new NullInfoStream(), null, null);
|
||||
int id = random().nextInt(10);
|
||||
if (random().nextBoolean()) {
|
||||
pool.enableReaderPooling();
|
||||
@ -213,7 +213,7 @@ public class TestReaderPool extends LuceneTestCase {
|
||||
StandardDirectoryReader reader = (StandardDirectoryReader) DirectoryReader.open(directory);
|
||||
SegmentInfos segmentInfos = reader.segmentInfos.clone();
|
||||
ReaderPool pool = new ReaderPool(directory, directory, segmentInfos, fieldNumbers, () -> 0L,
|
||||
new NullInfoStream(), null, null, Collections.emptyMap());
|
||||
new NullInfoStream(), null, null);
|
||||
if (random().nextBoolean()) {
|
||||
pool.enableReaderPooling();
|
||||
}
|
||||
@ -287,7 +287,7 @@ public class TestReaderPool extends LuceneTestCase {
|
||||
StandardDirectoryReader reader = (StandardDirectoryReader) DirectoryReader.open(directory);
|
||||
SegmentInfos segmentInfos = reader.segmentInfos.clone();
|
||||
ReaderPool pool = new ReaderPool(directory, directory, segmentInfos, fieldNumbers, () -> 0l,
|
||||
new NullInfoStream(), null, null, Collections.emptyMap());
|
||||
new NullInfoStream(), null, null);
|
||||
assertEquals(0, pool.getReadersByRam().size());
|
||||
|
||||
int ord = 0;
|
||||
|
@ -60,8 +60,8 @@ public class TestSegmentMerger extends LuceneTestCase {
|
||||
SegmentCommitInfo info1 = DocHelper.writeDoc(random(), merge1Dir, doc1);
|
||||
DocHelper.setupDoc(doc2);
|
||||
SegmentCommitInfo info2 = DocHelper.writeDoc(random(), merge2Dir, doc2);
|
||||
reader1 = new SegmentReader(info1, Version.LATEST.major, false, newIOContext(random()), Collections.emptyMap());
|
||||
reader2 = new SegmentReader(info2, Version.LATEST.major, false, newIOContext(random()), Collections.emptyMap());
|
||||
reader1 = new SegmentReader(info1, Version.LATEST.major, false, newIOContext(random()));
|
||||
reader2 = new SegmentReader(info2, Version.LATEST.major, false, newIOContext(random()));
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -95,10 +95,10 @@ public class TestSegmentMerger extends LuceneTestCase {
|
||||
assertTrue(docsMerged == 2);
|
||||
//Should be able to open a new SegmentReader against the new directory
|
||||
SegmentReader mergedReader = new SegmentReader(new SegmentCommitInfo(
|
||||
mergeState.segmentInfo,
|
||||
0, 0, -1L, -1L, -1L),
|
||||
Version.LATEST.major,
|
||||
false, newIOContext(random()), Collections.emptyMap());
|
||||
mergeState.segmentInfo,
|
||||
0, 0, -1L, -1L, -1L),
|
||||
Version.LATEST.major,
|
||||
false, newIOContext(random()));
|
||||
assertTrue(mergedReader != null);
|
||||
assertTrue(mergedReader.numDocs() == 2);
|
||||
Document newDoc1 = mergedReader.document(0);
|
||||
|
@ -19,7 +19,6 @@ package org.apache.lucene.index;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
|
||||
@ -44,7 +43,7 @@ public class TestSegmentReader extends LuceneTestCase {
|
||||
dir = newDirectory();
|
||||
DocHelper.setupDoc(testDoc);
|
||||
SegmentCommitInfo info = DocHelper.writeDoc(random(), dir, testDoc);
|
||||
reader = new SegmentReader(info, Version.LATEST.major, false, IOContext.READ, Collections.emptyMap());
|
||||
reader = new SegmentReader(info, Version.LATEST.major, false, IOContext.READ);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -18,7 +18,6 @@ package org.apache.lucene.index;
|
||||
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
@ -55,7 +54,7 @@ public class TestSegmentTermDocs extends LuceneTestCase {
|
||||
|
||||
public void testTermDocs() throws IOException {
|
||||
//After adding the document, we should be able to read it back in
|
||||
SegmentReader reader = new SegmentReader(info, Version.LATEST.major, false, newIOContext(random()), Collections.emptyMap());
|
||||
SegmentReader reader = new SegmentReader(info, Version.LATEST.major, false, newIOContext(random()));
|
||||
assertTrue(reader != null);
|
||||
|
||||
TermsEnum terms = reader.terms(DocHelper.TEXT_FIELD_2_KEY).iterator();
|
||||
@ -73,7 +72,7 @@ public class TestSegmentTermDocs extends LuceneTestCase {
|
||||
public void testBadSeek() throws IOException {
|
||||
{
|
||||
//After adding the document, we should be able to read it back in
|
||||
SegmentReader reader = new SegmentReader(info, Version.LATEST.major, false, newIOContext(random()), Collections.emptyMap());
|
||||
SegmentReader reader = new SegmentReader(info, Version.LATEST.major, false, newIOContext(random()));
|
||||
assertTrue(reader != null);
|
||||
PostingsEnum termDocs = TestUtil.docs(random(), reader,
|
||||
"textField2",
|
||||
@ -86,7 +85,7 @@ public class TestSegmentTermDocs extends LuceneTestCase {
|
||||
}
|
||||
{
|
||||
//After adding the document, we should be able to read it back in
|
||||
SegmentReader reader = new SegmentReader(info, Version.LATEST.major, false, newIOContext(random()), Collections.emptyMap());
|
||||
SegmentReader reader = new SegmentReader(info, Version.LATEST.major, false, newIOContext(random()));
|
||||
assertTrue(reader != null);
|
||||
PostingsEnum termDocs = TestUtil.docs(random(), reader,
|
||||
"junk",
|
||||
|
@ -19,7 +19,6 @@ package org.apache.lucene.replicator.nrt;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
@ -56,7 +55,7 @@ class SegmentInfosSearcherManager extends ReferenceManager<IndexSearcher> {
|
||||
this.searcherFactory = searcherFactory;
|
||||
currentInfos = infosIn;
|
||||
node.message("SegmentInfosSearcherManager.init: use incoming infos=" + infosIn.toString());
|
||||
current = SearcherManager.getSearcher(searcherFactory, StandardDirectoryReader.open(dir, currentInfos, null, Collections.emptyMap()), null);
|
||||
current = SearcherManager.getSearcher(searcherFactory, StandardDirectoryReader.open(dir, currentInfos, null), null);
|
||||
addReaderClosedListener(current.getIndexReader());
|
||||
}
|
||||
|
||||
@ -105,7 +104,7 @@ class SegmentInfosSearcherManager extends ReferenceManager<IndexSearcher> {
|
||||
}
|
||||
|
||||
// Open a new reader, sharing any common segment readers with the old one:
|
||||
DirectoryReader r = StandardDirectoryReader.open(dir, currentInfos, subs, Collections.emptyMap());
|
||||
DirectoryReader r = StandardDirectoryReader.open(dir, currentInfos, subs);
|
||||
addReaderClosedListener(r);
|
||||
node.message("refreshed to version=" + currentInfos.getVersion() + " r=" + r);
|
||||
return SearcherManager.getSearcher(searcherFactory, r, old.getIndexReader());
|
||||
|
@ -18,7 +18,6 @@ package org.apache.lucene.codecs.cheapbastard;
|
||||
|
||||
import org.apache.lucene.codecs.FilterCodec;
|
||||
import org.apache.lucene.codecs.PostingsFormat;
|
||||
import org.apache.lucene.codecs.blocktree.BlockTreeTermsReader;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
/** Codec that tries to use as little ram as possible because he spent all his money on beer */
|
||||
@ -26,7 +25,7 @@ import org.apache.lucene.util.TestUtil;
|
||||
// but if we named it "LowMemory" in codecs/ package, it would be irresistible like optimize()!
|
||||
public class CheapBastardCodec extends FilterCodec {
|
||||
|
||||
private final PostingsFormat postings = TestUtil.getDefaultPostingsFormat(100, 200, BlockTreeTermsReader.FSTLoadMode.OFF_HEAP);
|
||||
private final PostingsFormat postings = TestUtil.getDefaultPostingsFormat(100, 200);
|
||||
|
||||
public CheapBastardCodec() {
|
||||
super("CheapBastard", TestUtil.getDefaultCodec());
|
||||
|
@ -19,7 +19,6 @@ package org.apache.lucene.codecs.mockrandom;
|
||||
import java.io.IOException;
|
||||
import java.util.Random;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
|
||||
import org.apache.lucene.codecs.CodecUtil;
|
||||
import org.apache.lucene.codecs.FieldsConsumer;
|
||||
import org.apache.lucene.codecs.FieldsProducer;
|
||||
@ -294,7 +293,7 @@ public final class MockRandomPostingsFormat extends PostingsFormat {
|
||||
|
||||
boolean success = false;
|
||||
try {
|
||||
fields = new BlockTreeTermsReader(postingsReader, state, RandomPicks.randomFrom(random, BlockTreeTermsReader.FSTLoadMode.values()));
|
||||
fields = new BlockTreeTermsReader(postingsReader, state);
|
||||
success = true;
|
||||
} finally {
|
||||
if (!success) {
|
||||
|
@ -362,7 +362,7 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase {
|
||||
segmentInfo, fieldInfos,
|
||||
null, new IOContext(new FlushInfo(1, 20)));
|
||||
|
||||
SegmentReadState readState = new SegmentReadState(dir, segmentInfo, fieldInfos, false, IOContext.READ, Collections.emptyMap());
|
||||
SegmentReadState readState = new SegmentReadState(dir, segmentInfo, fieldInfos, false, IOContext.READ);
|
||||
|
||||
// PostingsFormat
|
||||
NormsProducer fakeNorms = new NormsProducer() {
|
||||
|
@ -26,7 +26,6 @@ import java.util.Map;
|
||||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
|
||||
import org.apache.lucene.codecs.DocValuesFormat;
|
||||
import org.apache.lucene.codecs.PointsFormat;
|
||||
import org.apache.lucene.codecs.PointsReader;
|
||||
@ -39,7 +38,6 @@ import org.apache.lucene.codecs.asserting.AssertingPostingsFormat;
|
||||
import org.apache.lucene.codecs.blockterms.LuceneFixedGap;
|
||||
import org.apache.lucene.codecs.blockterms.LuceneVarGapDocFreqInterval;
|
||||
import org.apache.lucene.codecs.blockterms.LuceneVarGapFixedInterval;
|
||||
import org.apache.lucene.codecs.blocktree.BlockTreeTermsReader;
|
||||
import org.apache.lucene.codecs.blocktreeords.BlockTreeOrdsPostingsFormat;
|
||||
import org.apache.lucene.codecs.bloom.TestBloomFilteredLucenePostings;
|
||||
import org.apache.lucene.codecs.lucene60.Lucene60PointsReader;
|
||||
@ -187,7 +185,7 @@ public class RandomCodec extends AssertingCodec {
|
||||
bkdSplitRandomSeed = random.nextInt();
|
||||
|
||||
add(avoidCodecs,
|
||||
TestUtil.getDefaultPostingsFormat(minItemsPerBlock, maxItemsPerBlock, RandomPicks.randomFrom(random, BlockTreeTermsReader.FSTLoadMode.values())),
|
||||
TestUtil.getDefaultPostingsFormat(minItemsPerBlock, maxItemsPerBlock),
|
||||
new FSTPostingsFormat(),
|
||||
new DirectPostingsFormat(LuceneTestCase.rarely(random) ? 1 : (LuceneTestCase.rarely(random) ? Integer.MAX_VALUE : maxItemsPerBlock),
|
||||
LuceneTestCase.rarely(random) ? 1 : (LuceneTestCase.rarely(random) ? Integer.MAX_VALUE : lowFreqCutoff)),
|
||||
|
@ -748,7 +748,7 @@ public class RandomPostingsTester {
|
||||
|
||||
currentFieldInfos = newFieldInfos;
|
||||
|
||||
SegmentReadState readState = new SegmentReadState(dir, segmentInfo, newFieldInfos, false, IOContext.READ, Collections.emptyMap());
|
||||
SegmentReadState readState = new SegmentReadState(dir, segmentInfo, newFieldInfos, false, IOContext.READ);
|
||||
|
||||
return codec.postingsFormat().fieldsProducer(readState);
|
||||
}
|
||||
|
@ -51,7 +51,6 @@ import org.apache.lucene.codecs.DocValuesFormat;
|
||||
import org.apache.lucene.codecs.PostingsFormat;
|
||||
import org.apache.lucene.codecs.asserting.AssertingCodec;
|
||||
import org.apache.lucene.codecs.blockterms.LuceneFixedGap;
|
||||
import org.apache.lucene.codecs.blocktree.BlockTreeTermsReader;
|
||||
import org.apache.lucene.codecs.blocktreeords.BlockTreeOrdsPostingsFormat;
|
||||
import org.apache.lucene.codecs.lucene80.Lucene80DocValuesFormat;
|
||||
import org.apache.lucene.codecs.lucene84.Lucene84Codec;
|
||||
@ -934,8 +933,8 @@ public final class TestUtil {
|
||||
* Returns the actual default postings format (e.g. LuceneMNPostingsFormat for this version of Lucene.
|
||||
* @lucene.internal this may disappear at any time
|
||||
*/
|
||||
public static PostingsFormat getDefaultPostingsFormat(int minItemsPerBlock, int maxItemsPerBlock, BlockTreeTermsReader.FSTLoadMode fstLoadMode) {
|
||||
return new Lucene84PostingsFormat(minItemsPerBlock, maxItemsPerBlock, fstLoadMode);
|
||||
public static PostingsFormat getDefaultPostingsFormat(int minItemsPerBlock, int maxItemsPerBlock) {
|
||||
return new Lucene84PostingsFormat(minItemsPerBlock, maxItemsPerBlock);
|
||||
}
|
||||
|
||||
/** Returns a random postings format that supports term ordinals */
|
||||
|
Loading…
x
Reference in New Issue
Block a user