mirror of https://github.com/apache/lucene.git
Convert IOContext, MergeInfo, and FlushInfo to record classes (#13205)
This commit is contained in:
parent
209aa05c0c
commit
8c4ec1dbef
|
@ -23,8 +23,8 @@ version = "1.0.0-SNAPSHOT"
|
||||||
group = "org.apache.lucene.tools"
|
group = "org.apache.lucene.tools"
|
||||||
description = 'Doclet-based javadoc validation'
|
description = 'Doclet-based javadoc validation'
|
||||||
|
|
||||||
sourceCompatibility = JavaVersion.VERSION_11
|
sourceCompatibility = JavaVersion.VERSION_21
|
||||||
targetCompatibility = JavaVersion.VERSION_11
|
targetCompatibility = JavaVersion.VERSION_21
|
||||||
|
|
||||||
tasks.withType(JavaCompile) {
|
tasks.withType(JavaCompile) {
|
||||||
options.compilerArgs += ["--release", targetCompatibility.toString()]
|
options.compilerArgs += ["--release", targetCompatibility.toString()]
|
||||||
|
|
|
@ -237,6 +237,7 @@ public class MissingDoclet extends StandardDoclet {
|
||||||
case CLASS:
|
case CLASS:
|
||||||
case INTERFACE:
|
case INTERFACE:
|
||||||
case ENUM:
|
case ENUM:
|
||||||
|
case RECORD:
|
||||||
case ANNOTATION_TYPE:
|
case ANNOTATION_TYPE:
|
||||||
if (level(element) >= CLASS) {
|
if (level(element) >= CLASS) {
|
||||||
checkComment(element);
|
checkComment(element);
|
||||||
|
|
|
@ -87,6 +87,8 @@ API Changes
|
||||||
* GITHUB#13146, GITHUB#13148: Remove ByteBufferIndexInput and only use MemorySegment APIs
|
* GITHUB#13146, GITHUB#13148: Remove ByteBufferIndexInput and only use MemorySegment APIs
|
||||||
for MMapDirectory. (Uwe Schindler)
|
for MMapDirectory. (Uwe Schindler)
|
||||||
|
|
||||||
|
* GITHUB#13205: Convert IOContext, MergeInfo, and FlushInfo to record classes. (Uwe Schindler)
|
||||||
|
|
||||||
New Features
|
New Features
|
||||||
---------------------
|
---------------------
|
||||||
|
|
||||||
|
|
|
@ -151,6 +151,15 @@ may throw `IOException` on index problems, bubbling up unexpectedly to the calle
|
||||||
`(Reverse)PathHierarchyTokenizer` now produces sequential (instead of overlapping) tokens with accurate
|
`(Reverse)PathHierarchyTokenizer` now produces sequential (instead of overlapping) tokens with accurate
|
||||||
offsets, making positional queries and highlighters possible for fields tokenized with this tokenizer.
|
offsets, making positional queries and highlighters possible for fields tokenized with this tokenizer.
|
||||||
|
|
||||||
|
### Some classes converted to records classes (GITHUB#13207)
|
||||||
|
|
||||||
|
Some classes with only final fields and no programming logic were converted to `record` classes.
|
||||||
|
Those changes are mostly compatible with Lucene 9.x code (constructors, accessor methods), but
|
||||||
|
record's fields are only available with accessor methods. Some code may need to be refactored to
|
||||||
|
access the members using method calls instead of field accesses. Affected classes:
|
||||||
|
|
||||||
|
- `IOContext`, `MergeInfo`, and `FlushInfo` (GITHUB#13205)
|
||||||
|
|
||||||
## Migration from Lucene 9.0 to Lucene 9.1
|
## Migration from Lucene 9.0 to Lucene 9.1
|
||||||
|
|
||||||
### Test framework package migration and module (LUCENE-10301)
|
### Test framework package migration and module (LUCENE-10301)
|
||||||
|
|
|
@ -27,7 +27,6 @@ import org.apache.lucene.index.CorruptIndexException;
|
||||||
import org.apache.lucene.index.FieldInfo;
|
import org.apache.lucene.index.FieldInfo;
|
||||||
import org.apache.lucene.index.IndexFileNames;
|
import org.apache.lucene.index.IndexFileNames;
|
||||||
import org.apache.lucene.index.SegmentReadState;
|
import org.apache.lucene.index.SegmentReadState;
|
||||||
import org.apache.lucene.store.IOContext;
|
|
||||||
import org.apache.lucene.store.IndexInput;
|
import org.apache.lucene.store.IndexInput;
|
||||||
import org.apache.lucene.util.Accountable;
|
import org.apache.lucene.util.Accountable;
|
||||||
import org.apache.lucene.util.Accountables;
|
import org.apache.lucene.util.Accountables;
|
||||||
|
@ -54,7 +53,7 @@ public class VariableGapTermsIndexReader extends TermsIndexReaderBase {
|
||||||
state.segmentInfo.name,
|
state.segmentInfo.name,
|
||||||
state.segmentSuffix,
|
state.segmentSuffix,
|
||||||
VariableGapTermsIndexWriter.TERMS_INDEX_EXTENSION);
|
VariableGapTermsIndexWriter.TERMS_INDEX_EXTENSION);
|
||||||
final IndexInput in = state.directory.openInput(fileName, new IOContext(state.context, true));
|
final IndexInput in = state.directory.openInput(fileName, state.context.toReadOnce());
|
||||||
boolean success = false;
|
boolean success = false;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
|
|
@ -103,7 +103,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
|
||||||
@Override
|
@Override
|
||||||
public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException {
|
public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException {
|
||||||
FieldsProducer postings = PostingsFormat.forName("Lucene99").fieldsProducer(state);
|
FieldsProducer postings = PostingsFormat.forName("Lucene99").fieldsProducer(state);
|
||||||
if (state.context.context != IOContext.Context.MERGE) {
|
if (state.context.context() != IOContext.Context.MERGE) {
|
||||||
FieldsProducer loadedPostings;
|
FieldsProducer loadedPostings;
|
||||||
try {
|
try {
|
||||||
postings.checkIntegrity();
|
postings.checkIntegrity();
|
||||||
|
|
|
@ -296,7 +296,7 @@ public class ConcurrentMergeScheduler extends MergeScheduler {
|
||||||
// This Directory is only supposed to be used during merging,
|
// This Directory is only supposed to be used during merging,
|
||||||
// so all writes should have MERGE context, else there is a bug
|
// so all writes should have MERGE context, else there is a bug
|
||||||
// somewhere that is failing to pass down the right IOContext:
|
// somewhere that is failing to pass down the right IOContext:
|
||||||
assert context.context == IOContext.Context.MERGE : "got context=" + context.context;
|
assert context.context() == IOContext.Context.MERGE : "got context=" + context.context();
|
||||||
|
|
||||||
return new RateLimitedIndexOutput(rateLimiter, in.createOutput(name, context));
|
return new RateLimitedIndexOutput(rateLimiter, in.createOutput(name, context));
|
||||||
}
|
}
|
||||||
|
|
|
@ -63,9 +63,9 @@ final class SegmentMerger {
|
||||||
IOContext context,
|
IOContext context,
|
||||||
Executor intraMergeTaskExecutor)
|
Executor intraMergeTaskExecutor)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
if (context.context != IOContext.Context.MERGE) {
|
if (context.context() != IOContext.Context.MERGE) {
|
||||||
throw new IllegalArgumentException(
|
throw new IllegalArgumentException(
|
||||||
"IOContext.context should be MERGE; got: " + context.context);
|
"IOContext.context should be MERGE; got: " + context.context());
|
||||||
}
|
}
|
||||||
mergeState = new MergeState(readers, segmentInfo, infoStream, intraMergeTaskExecutor);
|
mergeState = new MergeState(readers, segmentInfo, infoStream, intraMergeTaskExecutor);
|
||||||
directory = dir;
|
directory = dir;
|
||||||
|
|
|
@ -369,7 +369,7 @@ public abstract class BufferedIndexInput extends IndexInput implements RandomAcc
|
||||||
|
|
||||||
/** Returns default buffer sizes for the given {@link IOContext} */
|
/** Returns default buffer sizes for the given {@link IOContext} */
|
||||||
public static int bufferSize(IOContext context) {
|
public static int bufferSize(IOContext context) {
|
||||||
switch (context.context) {
|
switch (context.context()) {
|
||||||
case MERGE:
|
case MERGE:
|
||||||
return MERGE_BUFFER_SIZE;
|
return MERGE_BUFFER_SIZE;
|
||||||
case DEFAULT:
|
case DEFAULT:
|
||||||
|
|
|
@ -19,46 +19,7 @@ package org.apache.lucene.store;
|
||||||
/**
|
/**
|
||||||
* A FlushInfo provides information required for a FLUSH context. It is used as part of an {@link
|
* A FlushInfo provides information required for a FLUSH context. It is used as part of an {@link
|
||||||
* IOContext} in case of FLUSH context.
|
* IOContext} in case of FLUSH context.
|
||||||
*/
|
|
||||||
public class FlushInfo {
|
|
||||||
|
|
||||||
public final int numDocs;
|
|
||||||
|
|
||||||
public final long estimatedSegmentSize;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new {@link FlushInfo} instance from the values required for a FLUSH {@link IOContext}
|
|
||||||
* context.
|
|
||||||
*
|
*
|
||||||
* <p>These values are only estimates and are not the actual values.
|
* <p>These values are only estimates and are not the actual values.
|
||||||
*/
|
*/
|
||||||
public FlushInfo(int numDocs, long estimatedSegmentSize) {
|
public record FlushInfo(int numDocs, long estimatedSegmentSize) {}
|
||||||
this.numDocs = numDocs;
|
|
||||||
this.estimatedSegmentSize = estimatedSegmentSize;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public int hashCode() {
|
|
||||||
final int prime = 31;
|
|
||||||
int result = 1;
|
|
||||||
result = prime * result + (int) (estimatedSegmentSize ^ (estimatedSegmentSize >>> 32));
|
|
||||||
result = prime * result + numDocs;
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean equals(Object obj) {
|
|
||||||
if (this == obj) return true;
|
|
||||||
if (obj == null) return false;
|
|
||||||
if (getClass() != obj.getClass()) return false;
|
|
||||||
FlushInfo other = (FlushInfo) obj;
|
|
||||||
if (estimatedSegmentSize != other.estimatedSegmentSize) return false;
|
|
||||||
if (numDocs != other.numDocs) return false;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public String toString() {
|
|
||||||
return "FlushInfo [numDocs=" + numDocs + ", estimatedSegmentSize=" + estimatedSegmentSize + "]";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -23,8 +23,26 @@ import java.util.Objects;
|
||||||
* initialized as null as passed as a parameter to either {@link
|
* initialized as null as passed as a parameter to either {@link
|
||||||
* org.apache.lucene.store.Directory#openInput(String, IOContext)} or {@link
|
* org.apache.lucene.store.Directory#openInput(String, IOContext)} or {@link
|
||||||
* org.apache.lucene.store.Directory#createOutput(String, IOContext)}
|
* org.apache.lucene.store.Directory#createOutput(String, IOContext)}
|
||||||
|
*
|
||||||
|
* @param context An object of a enumerator Context type
|
||||||
|
* @param mergeInfo must be given when {@code context == MERGE}
|
||||||
|
* @param flushInfo must be given when {@code context == FLUSH}
|
||||||
|
* @param readOnce This flag indicates that the file will be opened, then fully read sequentially
|
||||||
|
* then closed.
|
||||||
|
* @param load This flag is used for files that are a small fraction of the total index size and are
|
||||||
|
* expected to be heavily accessed in random-access fashion. Some {@link Directory}
|
||||||
|
* implementations may choose to load such files into physical memory (e.g. Java heap) as a way
|
||||||
|
* to provide stronger guarantees on query latency.
|
||||||
|
* @param randomAccess This flag indicates that the file will be accessed randomly. If this flag is
|
||||||
|
* set, then readOnce will be false.
|
||||||
*/
|
*/
|
||||||
public class IOContext {
|
public record IOContext(
|
||||||
|
Context context,
|
||||||
|
MergeInfo mergeInfo,
|
||||||
|
FlushInfo flushInfo,
|
||||||
|
boolean readOnce,
|
||||||
|
boolean load,
|
||||||
|
boolean randomAccess) {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Context is a enumerator which specifies the context in which the Directory is being used for.
|
* Context is a enumerator which specifies the context in which the Directory is being used for.
|
||||||
|
@ -36,30 +54,6 @@ public class IOContext {
|
||||||
DEFAULT
|
DEFAULT
|
||||||
};
|
};
|
||||||
|
|
||||||
/** An object of a enumerator Context type */
|
|
||||||
public final Context context;
|
|
||||||
|
|
||||||
public final MergeInfo mergeInfo;
|
|
||||||
|
|
||||||
public final FlushInfo flushInfo;
|
|
||||||
|
|
||||||
/** This flag indicates that the file will be opened, then fully read sequentially then closed. */
|
|
||||||
public final boolean readOnce;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This flag indicates that the file will be accessed randomly. If this flag is set, then readOnce
|
|
||||||
* will be false.
|
|
||||||
*/
|
|
||||||
public final boolean randomAccess;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This flag is used for files that are a small fraction of the total index size and are expected
|
|
||||||
* to be heavily accessed in random-access fashion. Some {@link Directory} implementations may
|
|
||||||
* choose to load such files into physical memory (e.g. Java heap) as a way to provide stronger
|
|
||||||
* guarantees on query latency. If this flag is set, then {@link #randomAccess} will be true.
|
|
||||||
*/
|
|
||||||
public final boolean load;
|
|
||||||
|
|
||||||
public static final IOContext DEFAULT = new IOContext(Context.DEFAULT);
|
public static final IOContext DEFAULT = new IOContext(Context.DEFAULT);
|
||||||
|
|
||||||
public static final IOContext READONCE = new IOContext(true, false, false);
|
public static final IOContext READONCE = new IOContext(true, false, false);
|
||||||
|
@ -70,106 +64,48 @@ public class IOContext {
|
||||||
|
|
||||||
public static final IOContext RANDOM = new IOContext(false, false, true);
|
public static final IOContext RANDOM = new IOContext(false, false, true);
|
||||||
|
|
||||||
public IOContext() {
|
@SuppressWarnings("incomplete-switch")
|
||||||
this(false, false, false);
|
public IOContext {
|
||||||
|
switch (context) {
|
||||||
|
case MERGE -> Objects.requireNonNull(
|
||||||
|
mergeInfo, "mergeInfo must not be null if context is MERGE");
|
||||||
|
case FLUSH -> Objects.requireNonNull(
|
||||||
|
flushInfo, "flushInfo must not be null if context is FLUSH");
|
||||||
}
|
}
|
||||||
|
if (load && readOnce) {
|
||||||
public IOContext(FlushInfo flushInfo) {
|
throw new IllegalArgumentException("load and readOnce are mutually exclusive");
|
||||||
assert flushInfo != null;
|
|
||||||
this.context = Context.FLUSH;
|
|
||||||
this.mergeInfo = null;
|
|
||||||
this.readOnce = false;
|
|
||||||
this.load = false;
|
|
||||||
this.randomAccess = false;
|
|
||||||
this.flushInfo = flushInfo;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public IOContext(Context context) {
|
|
||||||
this(context, null);
|
|
||||||
}
|
|
||||||
|
|
||||||
private IOContext(boolean readOnce, boolean load, boolean randomAccess) {
|
|
||||||
if (readOnce && randomAccess) {
|
if (readOnce && randomAccess) {
|
||||||
throw new IllegalArgumentException("cannot be both readOnce and randomAccess");
|
throw new IllegalArgumentException("readOnce and randomAccess are mutually exclusive");
|
||||||
}
|
}
|
||||||
if (load && randomAccess == false) {
|
if (load && randomAccess == false) {
|
||||||
throw new IllegalArgumentException("cannot be load but not randomAccess");
|
throw new IllegalArgumentException("cannot be load but not randomAccess");
|
||||||
}
|
}
|
||||||
this.context = Context.READ;
|
|
||||||
this.mergeInfo = null;
|
|
||||||
this.readOnce = readOnce;
|
|
||||||
this.load = load;
|
|
||||||
this.randomAccess = randomAccess;
|
|
||||||
this.flushInfo = null;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private IOContext(boolean readOnce, boolean load, boolean randomAccess) {
|
||||||
|
this(Context.READ, null, null, readOnce, load, randomAccess);
|
||||||
|
}
|
||||||
|
|
||||||
|
private IOContext(Context context) {
|
||||||
|
this(context, null, null, false, false, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Creates an IOContext for flushing. */
|
||||||
|
public IOContext(FlushInfo flushInfo) {
|
||||||
|
this(Context.FLUSH, null, flushInfo, false, false, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Creates an IOContext for merging. */
|
||||||
public IOContext(MergeInfo mergeInfo) {
|
public IOContext(MergeInfo mergeInfo) {
|
||||||
this(Context.MERGE, mergeInfo);
|
this(Context.MERGE, mergeInfo, null, false, false, false);
|
||||||
}
|
|
||||||
|
|
||||||
private IOContext(Context context, MergeInfo mergeInfo) {
|
|
||||||
assert context != Context.MERGE || mergeInfo != null
|
|
||||||
: "MergeInfo must not be null if context is MERGE";
|
|
||||||
assert context != Context.FLUSH : "Use IOContext(FlushInfo) to create a FLUSH IOContext";
|
|
||||||
this.context = context;
|
|
||||||
this.readOnce = false;
|
|
||||||
this.load = false;
|
|
||||||
this.randomAccess = false;
|
|
||||||
this.mergeInfo = mergeInfo;
|
|
||||||
this.flushInfo = null;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This constructor is used to initialize a {@link IOContext} instance with a new value for the
|
* Return a copy of this IOContext with {@link #readOnce} set to {@code true}. The {@link #load}
|
||||||
* readOnce variable.
|
* flag is set to {@code false}.
|
||||||
*
|
|
||||||
* @param ctxt {@link IOContext} object whose information is used to create the new instance
|
|
||||||
* except the readOnce variable.
|
|
||||||
* @param readOnce The new {@link IOContext} object will use this value for readOnce.
|
|
||||||
*/
|
*/
|
||||||
public IOContext(IOContext ctxt, boolean readOnce) {
|
public IOContext toReadOnce() {
|
||||||
this.context = ctxt.context;
|
return new IOContext(context, mergeInfo, flushInfo, true, false, randomAccess);
|
||||||
this.mergeInfo = ctxt.mergeInfo;
|
|
||||||
this.flushInfo = ctxt.flushInfo;
|
|
||||||
this.readOnce = readOnce;
|
|
||||||
this.randomAccess = ctxt.randomAccess;
|
|
||||||
this.load = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public int hashCode() {
|
|
||||||
return Objects.hash(context, flushInfo, mergeInfo, readOnce, load, randomAccess);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean equals(Object obj) {
|
|
||||||
if (this == obj) return true;
|
|
||||||
if (obj == null) return false;
|
|
||||||
if (getClass() != obj.getClass()) return false;
|
|
||||||
IOContext other = (IOContext) obj;
|
|
||||||
if (context != other.context) return false;
|
|
||||||
if (!Objects.equals(flushInfo, other.flushInfo)) return false;
|
|
||||||
if (!Objects.equals(mergeInfo, other.mergeInfo)) return false;
|
|
||||||
if (readOnce != other.readOnce) return false;
|
|
||||||
if (load != other.load) return false;
|
|
||||||
if (randomAccess != other.randomAccess) return false;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public String toString() {
|
|
||||||
return "IOContext [context="
|
|
||||||
+ context
|
|
||||||
+ ", mergeInfo="
|
|
||||||
+ mergeInfo
|
|
||||||
+ ", flushInfo="
|
|
||||||
+ flushInfo
|
|
||||||
+ ", readOnce="
|
|
||||||
+ readOnce
|
|
||||||
+ ", load="
|
|
||||||
+ load
|
|
||||||
+ ", randomAccess="
|
|
||||||
+ randomAccess
|
|
||||||
+ "]";
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -88,7 +88,7 @@ public class MMapDirectory extends FSDirectory {
|
||||||
* opening them if they use the {@link IOContext#LOAD} I/O context.
|
* opening them if they use the {@link IOContext#LOAD} I/O context.
|
||||||
*/
|
*/
|
||||||
public static final BiPredicate<String, IOContext> BASED_ON_LOAD_IO_CONTEXT =
|
public static final BiPredicate<String, IOContext> BASED_ON_LOAD_IO_CONTEXT =
|
||||||
(filename, context) -> context.load;
|
(filename, context) -> context.load();
|
||||||
|
|
||||||
private BiPredicate<String, IOContext> preload = NO_FILES;
|
private BiPredicate<String, IOContext> preload = NO_FILES;
|
||||||
|
|
||||||
|
|
|
@ -19,65 +19,8 @@ package org.apache.lucene.store;
|
||||||
/**
|
/**
|
||||||
* A MergeInfo provides information required for a MERGE context. It is used as part of an {@link
|
* A MergeInfo provides information required for a MERGE context. It is used as part of an {@link
|
||||||
* IOContext} in case of MERGE context.
|
* IOContext} in case of MERGE context.
|
||||||
*/
|
|
||||||
public class MergeInfo {
|
|
||||||
|
|
||||||
public final int totalMaxDoc;
|
|
||||||
|
|
||||||
public final long estimatedMergeBytes;
|
|
||||||
|
|
||||||
public final boolean isExternal;
|
|
||||||
|
|
||||||
public final int mergeMaxNumSegments;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new {@link MergeInfo} instance from the values required for a MERGE {@link IOContext}
|
|
||||||
* context.
|
|
||||||
*
|
*
|
||||||
* <p>These values are only estimates and are not the actual values.
|
* <p>These values are only estimates and are not the actual values.
|
||||||
*/
|
*/
|
||||||
public MergeInfo(
|
public record MergeInfo(
|
||||||
int totalMaxDoc, long estimatedMergeBytes, boolean isExternal, int mergeMaxNumSegments) {
|
int totalMaxDoc, long estimatedMergeBytes, boolean isExternal, int mergeMaxNumSegments) {}
|
||||||
this.totalMaxDoc = totalMaxDoc;
|
|
||||||
this.estimatedMergeBytes = estimatedMergeBytes;
|
|
||||||
this.isExternal = isExternal;
|
|
||||||
this.mergeMaxNumSegments = mergeMaxNumSegments;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public int hashCode() {
|
|
||||||
final int prime = 31;
|
|
||||||
int result = 1;
|
|
||||||
result = prime * result + (int) (estimatedMergeBytes ^ (estimatedMergeBytes >>> 32));
|
|
||||||
result = prime * result + (isExternal ? 1231 : 1237);
|
|
||||||
result = prime * result + mergeMaxNumSegments;
|
|
||||||
result = prime * result + totalMaxDoc;
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean equals(Object obj) {
|
|
||||||
if (this == obj) return true;
|
|
||||||
if (obj == null) return false;
|
|
||||||
if (getClass() != obj.getClass()) return false;
|
|
||||||
MergeInfo other = (MergeInfo) obj;
|
|
||||||
if (estimatedMergeBytes != other.estimatedMergeBytes) return false;
|
|
||||||
if (isExternal != other.isExternal) return false;
|
|
||||||
if (mergeMaxNumSegments != other.mergeMaxNumSegments) return false;
|
|
||||||
if (totalMaxDoc != other.totalMaxDoc) return false;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public String toString() {
|
|
||||||
return "MergeInfo [totalMaxDoc="
|
|
||||||
+ totalMaxDoc
|
|
||||||
+ ", estimatedMergeBytes="
|
|
||||||
+ estimatedMergeBytes
|
|
||||||
+ ", isExternal="
|
|
||||||
+ isExternal
|
|
||||||
+ ", mergeMaxNumSegments="
|
|
||||||
+ mergeMaxNumSegments
|
|
||||||
+ "]";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -223,10 +223,10 @@ public class NRTCachingDirectory extends FilterDirectory implements Accountable
|
||||||
// size=" + (merge==null ? 0 : merge.estimatedMergeBytes));
|
// size=" + (merge==null ? 0 : merge.estimatedMergeBytes));
|
||||||
|
|
||||||
long bytes = 0;
|
long bytes = 0;
|
||||||
if (context.mergeInfo != null) {
|
if (context.mergeInfo() != null) {
|
||||||
bytes = context.mergeInfo.estimatedMergeBytes;
|
bytes = context.mergeInfo().estimatedMergeBytes();
|
||||||
} else if (context.flushInfo != null) {
|
} else if (context.flushInfo() != null) {
|
||||||
bytes = context.flushInfo.estimatedSegmentSize;
|
bytes = context.flushInfo().estimatedSegmentSize();
|
||||||
} else {
|
} else {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
|
@ -139,13 +139,13 @@ final class PosixNativeAccess extends NativeAccess {
|
||||||
private Integer mapIOContext(IOContext ctx) {
|
private Integer mapIOContext(IOContext ctx) {
|
||||||
// Merging always wins and implies sequential access, because kernel is advised to free pages
|
// Merging always wins and implies sequential access, because kernel is advised to free pages
|
||||||
// after use:
|
// after use:
|
||||||
if (ctx.context == Context.MERGE) {
|
if (ctx.context() == Context.MERGE) {
|
||||||
return POSIX_MADV_SEQUENTIAL;
|
return POSIX_MADV_SEQUENTIAL;
|
||||||
}
|
}
|
||||||
if (ctx.randomAccess) {
|
if (ctx.randomAccess()) {
|
||||||
return POSIX_MADV_RANDOM;
|
return POSIX_MADV_RANDOM;
|
||||||
}
|
}
|
||||||
if (ctx.readOnce) {
|
if (ctx.readOnce()) {
|
||||||
return POSIX_MADV_SEQUENTIAL;
|
return POSIX_MADV_SEQUENTIAL;
|
||||||
}
|
}
|
||||||
return null;
|
return null;
|
||||||
|
|
|
@ -48,14 +48,14 @@ public final class ByteWritesTrackingDirectoryWrapper extends FilterDirectory {
|
||||||
@Override
|
@Override
|
||||||
public IndexOutput createOutput(String name, IOContext ioContext) throws IOException {
|
public IndexOutput createOutput(String name, IOContext ioContext) throws IOException {
|
||||||
IndexOutput output = in.createOutput(name, ioContext);
|
IndexOutput output = in.createOutput(name, ioContext);
|
||||||
return createByteTrackingOutput(output, ioContext.context);
|
return createByteTrackingOutput(output, ioContext.context());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public IndexOutput createTempOutput(String prefix, String suffix, IOContext ioContext)
|
public IndexOutput createTempOutput(String prefix, String suffix, IOContext ioContext)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
IndexOutput output = in.createTempOutput(prefix, suffix, ioContext);
|
IndexOutput output = in.createTempOutput(prefix, suffix, ioContext);
|
||||||
return trackTempOutput ? createByteTrackingOutput(output, ioContext.context) : output;
|
return trackTempOutput ? createByteTrackingOutput(output, ioContext.context()) : output;
|
||||||
}
|
}
|
||||||
|
|
||||||
private IndexOutput createByteTrackingOutput(IndexOutput output, IOContext.Context context) {
|
private IndexOutput createByteTrackingOutput(IndexOutput output, IOContext.Context context) {
|
||||||
|
|
|
@ -158,8 +158,8 @@ public class DirectIODirectory extends FilterDirectory {
|
||||||
* requested from delegate directory.
|
* requested from delegate directory.
|
||||||
*/
|
*/
|
||||||
protected boolean useDirectIO(String name, IOContext context, OptionalLong fileLength) {
|
protected boolean useDirectIO(String name, IOContext context, OptionalLong fileLength) {
|
||||||
return context.context == Context.MERGE
|
return context.context() == Context.MERGE
|
||||||
&& context.mergeInfo.estimatedMergeBytes >= minBytesDirect
|
&& context.mergeInfo().estimatedMergeBytes() >= minBytesDirect
|
||||||
&& fileLength.orElse(minBytesDirect) >= minBytesDirect;
|
&& fileLength.orElse(minBytesDirect) >= minBytesDirect;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -146,7 +146,7 @@ public abstract class BaseCompoundFormatTestCase extends BaseIndexFileFormatTest
|
||||||
// LUCENE-5724: things like NRTCachingDir rely upon IOContext being properly passed down
|
// LUCENE-5724: things like NRTCachingDir rely upon IOContext being properly passed down
|
||||||
public void testPassIOContext() throws IOException {
|
public void testPassIOContext() throws IOException {
|
||||||
final String testfile = "_123.test";
|
final String testfile = "_123.test";
|
||||||
final IOContext myContext = new IOContext();
|
final IOContext myContext = IOContext.DEFAULT;
|
||||||
|
|
||||||
Directory dir =
|
Directory dir =
|
||||||
new FilterDirectory(newDirectory()) {
|
new FilterDirectory(newDirectory()) {
|
||||||
|
|
|
@ -465,7 +465,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
|
||||||
public void write(Fields fields, NormsProducer norms) throws IOException {
|
public void write(Fields fields, NormsProducer norms) throws IOException {
|
||||||
fieldsConsumer.write(fields, norms);
|
fieldsConsumer.write(fields, norms);
|
||||||
|
|
||||||
boolean isMerge = state.context.context == IOContext.Context.MERGE;
|
boolean isMerge = state.context.context() == IOContext.Context.MERGE;
|
||||||
|
|
||||||
// We only use one thread for flushing
|
// We only use one thread for flushing
|
||||||
// in this test:
|
// in this test:
|
||||||
|
|
|
@ -1778,18 +1778,19 @@ public abstract class LuceneTestCase extends Assert {
|
||||||
public static IOContext newIOContext(Random random, IOContext oldContext) {
|
public static IOContext newIOContext(Random random, IOContext oldContext) {
|
||||||
final int randomNumDocs = random.nextInt(4192);
|
final int randomNumDocs = random.nextInt(4192);
|
||||||
final int size = random.nextInt(512) * randomNumDocs;
|
final int size = random.nextInt(512) * randomNumDocs;
|
||||||
if (oldContext.flushInfo != null) {
|
if (oldContext.flushInfo() != null) {
|
||||||
// Always return at least the estimatedSegmentSize of
|
// Always return at least the estimatedSegmentSize of
|
||||||
// the incoming IOContext:
|
// the incoming IOContext:
|
||||||
return new IOContext(
|
return new IOContext(
|
||||||
new FlushInfo(randomNumDocs, Math.max(oldContext.flushInfo.estimatedSegmentSize, size)));
|
new FlushInfo(
|
||||||
} else if (oldContext.mergeInfo != null) {
|
randomNumDocs, Math.max(oldContext.flushInfo().estimatedSegmentSize(), size)));
|
||||||
|
} else if (oldContext.mergeInfo() != null) {
|
||||||
// Always return at least the estimatedMergeBytes of
|
// Always return at least the estimatedMergeBytes of
|
||||||
// the incoming IOContext:
|
// the incoming IOContext:
|
||||||
return new IOContext(
|
return new IOContext(
|
||||||
new MergeInfo(
|
new MergeInfo(
|
||||||
randomNumDocs,
|
randomNumDocs,
|
||||||
Math.max(oldContext.mergeInfo.estimatedMergeBytes, size),
|
Math.max(oldContext.mergeInfo().estimatedMergeBytes(), size),
|
||||||
random.nextBoolean(),
|
random.nextBoolean(),
|
||||||
TestUtil.nextInt(random, 1, 100)));
|
TestUtil.nextInt(random, 1, 100)));
|
||||||
} else {
|
} else {
|
||||||
|
|
Loading…
Reference in New Issue