Replace (IO)Context#READ with (IO)Context.DEFAULT. (#13242)

`DEFAULT` doesn't mean much today and could be used whenever `READ` is used. So
let's use `DEFAULT` all the time instead and remove `READ`.
This commit is contained in:
Adrien Grand 2024-03-29 10:37:26 +01:00 committed by GitHub
parent 4a3f4cfe97
commit 32d692049f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
28 changed files with 55 additions and 54 deletions

View File

@ -92,6 +92,8 @@ API Changes
* GITHUB#13219: The `readOnce`, `load` and `random` flags on `IOContext` have
been replaced with a new `ReadAdvice` enum. (Adrien Grand)
* GITHUB#13242: Replace `IOContext.READ` with `IOContext.DEFAULT`. (Adrien Grand)
New Features
---------------------

View File

@ -185,10 +185,12 @@ access the members using method calls instead of field accesses. Affected classe
The `readOnce`, `load` and `random` flags on `IOContext` have been replaced with a new `ReadAdvice`
enum.
### IOContext.LOAD removed
### IOContext.LOAD and IOContext.READ removed
`IOContext#LOAD` has been removed, it should be replaced with
`ioContext.toReadAdvice(ReadAdvice.RANDOM_PRELOAD)`.
`ioContext.withReadAdvice(ReadAdvice.RANDOM_PRELOAD)`.
`IOContext.READ` has been removed, it should be replaced with `IOContext.DEFAULT`.
## Migration from Lucene 9.0 to Lucene 9.1

View File

@ -67,7 +67,7 @@ final class FieldsIndexReader extends FieldsIndex {
indexInput =
EndiannessReverserUtil.openInput(
dir, IndexFileNames.segmentFileName(name, suffix, extension), IOContext.READ);
dir, IndexFileNames.segmentFileName(name, suffix, extension), IOContext.DEFAULT);
boolean success = false;
try {
CodecUtil.checkIndexHeader(

View File

@ -259,7 +259,7 @@ public class TestLegacyDirectMonotonic extends LuceneTestCase {
}
try (IndexInput metaIn = EndiannessReverserUtil.openInput(dir, "meta", IOContext.READONCE);
IndexInput dataIn = EndiannessReverserUtil.openInput(dir, "data", IOContext.READ)) {
IndexInput dataIn = EndiannessReverserUtil.openInput(dir, "data", IOContext.DEFAULT)) {
LegacyDirectMonotonicReader.Meta meta =
LegacyDirectMonotonicReader.loadMeta(metaIn, array.length, blockShift);
LegacyDirectMonotonicReader reader =

View File

@ -282,7 +282,7 @@ final class BufferedUpdatesStream implements Accountable {
ReadersAndUpdates rld, IOConsumer<ReadersAndUpdates> onClose, SegmentCommitInfo info)
throws IOException {
this.rld = rld;
reader = rld.getReader(IOContext.READ);
reader = rld.getReader(IOContext.DEFAULT);
startDelCount = rld.getDelCount();
delGen = info.getBufferedDeletesGen();
this.onClose = onClose;

View File

@ -538,7 +538,7 @@ public class IndexWriter
final ReadersAndUpdates rld = getPooledInstance(sci, true);
try {
assert Thread.holdsLock(IndexWriter.this);
SegmentReader segmentReader = rld.getReadOnlyClone(IOContext.READ);
SegmentReader segmentReader = rld.getReadOnlyClone(IOContext.DEFAULT);
// only track this if we actually do fullFlush merges
if (maxFullFlushMergeWaitMillis > 0) {
openedReadOnlyClones.put(sci.info.name, segmentReader);
@ -5447,7 +5447,7 @@ public class IndexWriter
final IndexReaderWarmer mergedSegmentWarmer = config.getMergedSegmentWarmer();
if (readerPool.isReaderPoolingEnabled() && mergedSegmentWarmer != null) {
final ReadersAndUpdates rld = getPooledInstance(merge.info, true);
final SegmentReader sr = rld.getReader(IOContext.READ);
final SegmentReader sr = rld.getReader(IOContext.DEFAULT);
try {
mergedSegmentWarmer.warm(sr);
} finally {

View File

@ -284,7 +284,7 @@ final class IndexingChain implements Accountable {
state.directory,
state.segmentInfo,
state.fieldInfos,
IOContext.READ,
IOContext.DEFAULT,
state.segmentSuffix);
t0 = System.nanoTime();

View File

@ -192,7 +192,7 @@ final class ReadersAndUpdates {
public synchronized boolean delete(int docID) throws IOException {
if (reader == null && pendingDeletes.mustInitOnDelete()) {
getReader(IOContext.READ).decRef(); // pass a reader to initialize the pending deletes
getReader(IOContext.DEFAULT).decRef(); // pass a reader to initialize the pending deletes
}
return pendingDeletes.delete(docID);
}
@ -241,7 +241,7 @@ final class ReadersAndUpdates {
private synchronized CodecReader getLatestReader() throws IOException {
if (this.reader == null) {
// get a reader and dec the ref right away we just make sure we have a reader
getReader(IOContext.READ).decRef();
getReader(IOContext.DEFAULT).decRef();
}
if (pendingDeletes.needsRefresh(reader)) {
// we have a reader but its live-docs are out of sync. let's create a temporary one that we

View File

@ -46,7 +46,7 @@ final class SegmentDocValues {
// set SegmentReadState to list only the fields that are relevant to that gen
SegmentReadState srs =
new SegmentReadState(dvDir, si.info, infos, IOContext.READ, segmentSuffix);
new SegmentReadState(dvDir, si.info, infos, IOContext.DEFAULT, segmentSuffix);
DocValuesFormat dvFormat = si.info.getCodec().docValuesFormat();
return new RefCount<DocValuesProducer>(dvFormat.fieldsProducer(srs)) {
@SuppressWarnings("synthetic-access")

View File

@ -395,7 +395,7 @@ public final class SegmentInfos implements Cloneable, Iterable<SegmentCommitInfo
input.readBytes(segmentID, 0, segmentID.length);
Codec codec = readCodec(input);
SegmentInfo info =
codec.segmentInfoFormat().read(directory, segName, segmentID, IOContext.READ);
codec.segmentInfoFormat().read(directory, segName, segmentID, IOContext.DEFAULT);
info.setCodec(codec);
totalDocs += info.maxDoc();
long delGen = CodecUtil.readBELong(input);

View File

@ -132,7 +132,7 @@ final class SegmentMerger {
directory,
mergeState.segmentInfo,
mergeState.mergeFieldInfos,
IOContext.READ,
IOContext.DEFAULT,
segmentWriteState.segmentSuffix);
TaskExecutor taskExecutor = new TaskExecutor(mergeState.intraMergeTaskExecutor);

View File

@ -91,7 +91,8 @@ public final class StandardDirectoryReader extends DirectoryReader {
try {
for (int i = sis.size() - 1; i >= 0; i--) {
readers[i] =
new SegmentReader(sis.info(i), sis.getIndexCreatedVersionMajor(), IOContext.READ);
new SegmentReader(
sis.info(i), sis.getIndexCreatedVersionMajor(), IOContext.DEFAULT);
}
// This may throw CorruptIndexException if there are too many docs, so
// it must be inside try clause so we close readers in that case:
@ -229,7 +230,7 @@ public final class StandardDirectoryReader extends DirectoryReader {
!= oldReader.getSegmentInfo().info.getUseCompoundFile()) {
// this is a new reader; in case we hit an exception we can decRef it safely
newReader =
new SegmentReader(commitInfo, infos.getIndexCreatedVersionMajor(), IOContext.READ);
new SegmentReader(commitInfo, infos.getIndexCreatedVersionMajor(), IOContext.DEFAULT);
newReaders[i] = newReader;
} else {
if (oldReader.isNRT) {

View File

@ -374,7 +374,6 @@ public abstract class BufferedIndexInput extends IndexInput implements RandomAcc
return MERGE_BUFFER_SIZE;
case DEFAULT:
case FLUSH:
case READ:
default:
return BUFFER_SIZE;
}

View File

@ -36,9 +36,11 @@ public record IOContext(
* Context is a enumerator which specifies the context in which the Directory is being used for.
*/
public enum Context {
/** Context for reads and writes that are associated with a merge. */
MERGE,
READ,
/** Context for writes that are associated with a segment flush. */
FLUSH,
/** Default context, can be used for reading or writing. */
DEFAULT
};
@ -47,8 +49,6 @@ public record IOContext(
public static final IOContext READONCE = new IOContext(ReadAdvice.SEQUENTIAL);
public static final IOContext READ = new IOContext(ReadAdvice.NORMAL);
@SuppressWarnings("incomplete-switch")
public IOContext {
Objects.requireNonNull(context, "context must not be null");
@ -63,15 +63,14 @@ public record IOContext(
throw new IllegalArgumentException(
"The MERGE context must use the SEQUENTIAL read access advice");
}
if ((context == Context.FLUSH || context == Context.DEFAULT)
&& readAdvice != ReadAdvice.NORMAL) {
if (context == Context.FLUSH && readAdvice != ReadAdvice.NORMAL) {
throw new IllegalArgumentException(
"The FLUSH and DEFAULT contexts must use the NORMAL read access advice");
"The FLUSH context must use the NORMAL read access advice");
}
}
private IOContext(ReadAdvice accessAdvice) {
this(Context.READ, null, null, accessAdvice);
this(Context.DEFAULT, null, null, accessAdvice);
}
/** Creates an IOContext for flushing. */
@ -87,12 +86,13 @@ public record IOContext(
/**
* Return an updated {@link IOContext} that has the provided {@link ReadAdvice} if the {@link
* Context} is a {@link Context#READ} context, otherwise return this existing instance. This helps
* preserve a {@link ReadAdvice#SEQUENTIAL} advice for merging, which is always the right choice,
* while allowing {@link IndexInput}s open for searching to use arbitrary {@link ReadAdvice}s.
* Context} is a {@link Context#DEFAULT} context, otherwise return this existing instance. This
* helps preserve a {@link ReadAdvice#SEQUENTIAL} advice for merging, which is always the right
* choice, while allowing {@link IndexInput}s open for searching to use arbitrary {@link
* ReadAdvice}s.
*/
public IOContext withReadAdvice(ReadAdvice advice) {
if (context == Context.READ) {
if (context == Context.DEFAULT) {
return new IOContext(advice);
} else {
return this;

View File

@ -3029,7 +3029,7 @@ public class TestIndexWriter extends LuceneTestCase {
writer.commit();
assertEquals(1, writer.getDocStats().maxDoc);
// now check that we moved to 3
dir.openInput("segments_3", IOContext.READ).close();
dir.openInput("segments_3", IOContext.DEFAULT).close();
}
reader.close();
in.close();

View File

@ -118,7 +118,7 @@ public class TestReaderPool extends LuceneTestCase {
}
for (SegmentCommitInfo commitInfo : segmentInfos) {
ReadersAndUpdates readersAndUpdates = pool.get(commitInfo, true);
SegmentReader readOnlyClone = readersAndUpdates.getReadOnlyClone(IOContext.READ);
SegmentReader readOnlyClone = readersAndUpdates.getReadOnlyClone(IOContext.DEFAULT);
PostingsEnum postings = readOnlyClone.postings(new Term("id", "" + id));
boolean expectUpdate = false;
int doc = -1;
@ -162,7 +162,7 @@ public class TestReaderPool extends LuceneTestCase {
assertEquals(expectUpdate, writtenToDisk);
if (expectUpdate) {
readersAndUpdates = pool.get(commitInfo, true);
SegmentReader updatedReader = readersAndUpdates.getReadOnlyClone(IOContext.READ);
SegmentReader updatedReader = readersAndUpdates.getReadOnlyClone(IOContext.DEFAULT);
assertNotSame(-1, doc);
NumericDocValues number = updatedReader.getNumericDocValues("number");
assertEquals(doc, number.advance(doc));
@ -195,7 +195,7 @@ public class TestReaderPool extends LuceneTestCase {
}
for (SegmentCommitInfo commitInfo : segmentInfos) {
ReadersAndUpdates readersAndUpdates = pool.get(commitInfo, true);
SegmentReader readOnlyClone = readersAndUpdates.getReadOnlyClone(IOContext.READ);
SegmentReader readOnlyClone = readersAndUpdates.getReadOnlyClone(IOContext.DEFAULT);
PostingsEnum postings = readOnlyClone.postings(new Term("id", "" + id));
boolean expectUpdate = false;
int doc = -1;
@ -217,7 +217,7 @@ public class TestReaderPool extends LuceneTestCase {
assertEquals(expectUpdate, writtenToDisk);
if (expectUpdate) {
readersAndUpdates = pool.get(commitInfo, true);
SegmentReader updatedReader = readersAndUpdates.getReadOnlyClone(IOContext.READ);
SegmentReader updatedReader = readersAndUpdates.getReadOnlyClone(IOContext.DEFAULT);
assertNotSame(-1, doc);
assertFalse(updatedReader.getLiveDocs().get(doc));
readersAndUpdates.release(updatedReader);
@ -255,7 +255,7 @@ public class TestReaderPool extends LuceneTestCase {
while (isDone.get() == false) {
for (SegmentCommitInfo commitInfo : segmentInfos) {
ReadersAndUpdates readersAndUpdates = pool.get(commitInfo, true);
SegmentReader segmentReader = readersAndUpdates.getReader(IOContext.READ);
SegmentReader segmentReader = readersAndUpdates.getReader(IOContext.DEFAULT);
readersAndUpdates.release(segmentReader);
pool.release(readersAndUpdates, random().nextBoolean());
}
@ -279,7 +279,7 @@ public class TestReaderPool extends LuceneTestCase {
for (int i = 0; i < reader.maxDoc(); i++) {
for (SegmentCommitInfo commitInfo : segmentInfos) {
ReadersAndUpdates readersAndUpdates = pool.get(commitInfo, true);
SegmentReader sr = readersAndUpdates.getReadOnlyClone(IOContext.READ);
SegmentReader sr = readersAndUpdates.getReadOnlyClone(IOContext.DEFAULT);
PostingsEnum postings = sr.postings(new Term("id", "" + i));
sr.decRef();
if (postings != null) {

View File

@ -42,7 +42,7 @@ public class TestSegmentReader extends LuceneTestCase {
dir = newDirectory();
DocHelper.setupDoc(testDoc);
SegmentCommitInfo info = DocHelper.writeDoc(random(), dir, testDoc);
reader = new SegmentReader(info, Version.LATEST.major, IOContext.READ);
reader = new SegmentReader(info, Version.LATEST.major, IOContext.DEFAULT);
}
@Override

View File

@ -110,7 +110,7 @@ public class TestMMapDirectory extends BaseDirectoryTestCase {
}
try (final IndexInput in =
dir.openInput("test", IOContext.READ.withReadAdvice(ReadAdvice.RANDOM))) {
dir.openInput("test", IOContext.DEFAULT.withReadAdvice(ReadAdvice.RANDOM))) {
final byte[] readBytes = new byte[size];
in.readBytes(readBytes, 0, readBytes.length);
assertArrayEquals(bytes, readBytes);

View File

@ -296,7 +296,7 @@ public class TestDirectMonotonic extends LuceneTestCase {
}
try (IndexInput metaIn = dir.openInput("meta", IOContext.READONCE);
IndexInput dataIn = dir.openInput("data", IOContext.READ)) {
IndexInput dataIn = dir.openInput("data", IOContext.DEFAULT)) {
DirectMonotonicReader.Meta meta =
DirectMonotonicReader.loadMeta(metaIn, array.length, blockShift);
DirectMonotonicReader reader =

View File

@ -59,11 +59,11 @@ public class KnnVectorDict implements Closeable {
* '.bin' file.
*/
public KnnVectorDict(Directory directory, String dictName) throws IOException {
try (IndexInput fstIn = directory.openInput(dictName + ".fst", IOContext.READ)) {
try (IndexInput fstIn = directory.openInput(dictName + ".fst", IOContext.DEFAULT)) {
fst = new FST<>(readMetadata(fstIn, PositiveIntOutputs.getSingleton()), fstIn);
}
vectors = directory.openInput(dictName + ".bin", IOContext.READ);
vectors = directory.openInput(dictName + ".bin", IOContext.DEFAULT);
long size = vectors.length();
vectors.seek(size - Integer.BYTES);
dimension = vectors.readInt();

View File

@ -67,7 +67,7 @@ public class TestAlwaysRefreshDirectoryTaxonomyReader extends FacetTestCase {
for (String file : dir1.listAll()) {
if (isExtra(file) == false) {
// the test framework creates these devious extra files just to chaos test the edge cases
commit1.copyFrom(dir1, file, file, IOContext.READ);
commit1.copyFrom(dir1, file, file, IOContext.DEFAULT);
}
}
@ -103,7 +103,7 @@ public class TestAlwaysRefreshDirectoryTaxonomyReader extends FacetTestCase {
// copy all index files from commit1
for (String file : commit1.listAll()) {
if (isExtra(file) == false) {
dir1.copyFrom(commit1, file, file, IOContext.READ);
dir1.copyFrom(commit1, file, file, IOContext.DEFAULT);
}
}

View File

@ -341,7 +341,7 @@ public final class IndexUtils {
@Override
protected String doBody(String segmentFileName) throws IOException {
String format = "unknown";
try (IndexInput in = dir.openInput(segmentFileName, IOContext.READ)) {
try (IndexInput in = dir.openInput(segmentFileName, IOContext.DEFAULT)) {
if (CodecUtil.CODEC_MAGIC == CodecUtil.readBEInt(in)) {
int actualVersion =
CodecUtil.checkHeaderNoMagic(

View File

@ -817,8 +817,8 @@ public final class BPIndexReorderer {
});
}
IndexInput termIDsInput = tempDir.openInput(termIDsFileName, IOContext.READ);
IndexInput startOffsets = tempDir.openInput(startOffsetsFileName, IOContext.READ);
IndexInput termIDsInput = tempDir.openInput(termIDsFileName, IOContext.DEFAULT);
IndexInput startOffsets = tempDir.openInput(startOffsetsFileName, IOContext.DEFAULT);
return new ForwardIndex(startOffsets, termIDsInput, maxTerm);
}

View File

@ -65,7 +65,6 @@ public final class ByteWritesTrackingDirectoryWrapper extends FilterDirectory {
case MERGE:
return new ByteTrackingIndexOutput(output, mergedBytes);
case DEFAULT:
case READ:
default:
return output;
}

View File

@ -899,7 +899,7 @@ public abstract class BaseCompoundFormatTestCase extends BaseIndexFileFormatTest
ReadBytesDirectoryWrapper readTrackingDir = new ReadBytesDirectoryWrapper(dir);
CompoundDirectory compoundDir =
si.getCodec().compoundFormat().getCompoundReader(readTrackingDir, si, IOContext.READ);
si.getCodec().compoundFormat().getCompoundReader(readTrackingDir, si, IOContext.DEFAULT);
compoundDir.checkIntegrity();
Map<String, FixedBitSet> readBytes = readTrackingDir.getReadBytes();
assertEquals(createdFiles, readBytes.keySet());

View File

@ -374,7 +374,8 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase {
new SegmentWriteState(
null, dir, segmentInfo, fieldInfos, null, new IOContext(new FlushInfo(1, 20)));
SegmentReadState readState = new SegmentReadState(dir, segmentInfo, fieldInfos, IOContext.READ);
SegmentReadState readState =
new SegmentReadState(dir, segmentInfo, fieldInfos, IOContext.DEFAULT);
// PostingsFormat
NormsProducer fakeNorms =

View File

@ -839,7 +839,7 @@ public class RandomPostingsTester {
currentFieldInfos = newFieldInfos;
SegmentReadState readState =
new SegmentReadState(dir, segmentInfo, newFieldInfos, IOContext.READ);
new SegmentReadState(dir, segmentInfo, newFieldInfos, IOContext.DEFAULT);
return codec.postingsFormat().fieldsProducer(readState);
}

View File

@ -1798,20 +1798,17 @@ public abstract class LuceneTestCase extends Assert {
} else {
// Make a totally random IOContext:
final IOContext context;
switch (random.nextInt(5)) {
switch (random.nextInt(4)) {
case 0:
context = IOContext.DEFAULT;
break;
case 1:
context = IOContext.READ;
break;
case 2:
context = IOContext.READONCE;
break;
case 3:
case 2:
context = new IOContext(new MergeInfo(randomNumDocs, size, true, -1));
break;
case 4:
case 3:
context = new IOContext(new FlushInfo(randomNumDocs, size));
break;
default: