mirror of https://github.com/apache/lucene.git
Convert IOContext, MergeInfo, and FlushInfo to record classes (#13205)
This commit is contained in:
parent
209aa05c0c
commit
8c4ec1dbef
|
@ -23,8 +23,8 @@ version = "1.0.0-SNAPSHOT"
|
|||
group = "org.apache.lucene.tools"
|
||||
description = 'Doclet-based javadoc validation'
|
||||
|
||||
sourceCompatibility = JavaVersion.VERSION_11
|
||||
targetCompatibility = JavaVersion.VERSION_11
|
||||
sourceCompatibility = JavaVersion.VERSION_21
|
||||
targetCompatibility = JavaVersion.VERSION_21
|
||||
|
||||
tasks.withType(JavaCompile) {
|
||||
options.compilerArgs += ["--release", targetCompatibility.toString()]
|
||||
|
|
|
@ -237,6 +237,7 @@ public class MissingDoclet extends StandardDoclet {
|
|||
case CLASS:
|
||||
case INTERFACE:
|
||||
case ENUM:
|
||||
case RECORD:
|
||||
case ANNOTATION_TYPE:
|
||||
if (level(element) >= CLASS) {
|
||||
checkComment(element);
|
||||
|
|
|
@ -87,6 +87,8 @@ API Changes
|
|||
* GITHUB#13146, GITHUB#13148: Remove ByteBufferIndexInput and only use MemorySegment APIs
|
||||
for MMapDirectory. (Uwe Schindler)
|
||||
|
||||
* GITHUB#13205: Convert IOContext, MergeInfo, and FlushInfo to record classes. (Uwe Schindler)
|
||||
|
||||
New Features
|
||||
---------------------
|
||||
|
||||
|
|
|
@ -151,6 +151,15 @@ may throw `IOException` on index problems, bubbling up unexpectedly to the calle
|
|||
`(Reverse)PathHierarchyTokenizer` now produces sequential (instead of overlapping) tokens with accurate
|
||||
offsets, making positional queries and highlighters possible for fields tokenized with this tokenizer.
|
||||
|
||||
### Some classes converted to records classes (GITHUB#13207)
|
||||
|
||||
Some classes with only final fields and no programming logic were converted to `record` classes.
|
||||
Those changes are mostly compatible with Lucene 9.x code (constructors, accessor methods), but
|
||||
record's fields are only available with accessor methods. Some code may need to be refactored to
|
||||
access the members using method calls instead of field accesses. Affected classes:
|
||||
|
||||
- `IOContext`, `MergeInfo`, and `FlushInfo` (GITHUB#13205)
|
||||
|
||||
## Migration from Lucene 9.0 to Lucene 9.1
|
||||
|
||||
### Test framework package migration and module (LUCENE-10301)
|
||||
|
|
|
@ -27,7 +27,6 @@ import org.apache.lucene.index.CorruptIndexException;
|
|||
import org.apache.lucene.index.FieldInfo;
|
||||
import org.apache.lucene.index.IndexFileNames;
|
||||
import org.apache.lucene.index.SegmentReadState;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.util.Accountable;
|
||||
import org.apache.lucene.util.Accountables;
|
||||
|
@ -54,7 +53,7 @@ public class VariableGapTermsIndexReader extends TermsIndexReaderBase {
|
|||
state.segmentInfo.name,
|
||||
state.segmentSuffix,
|
||||
VariableGapTermsIndexWriter.TERMS_INDEX_EXTENSION);
|
||||
final IndexInput in = state.directory.openInput(fileName, new IOContext(state.context, true));
|
||||
final IndexInput in = state.directory.openInput(fileName, state.context.toReadOnce());
|
||||
boolean success = false;
|
||||
|
||||
try {
|
||||
|
|
|
@ -103,7 +103,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
|
|||
@Override
|
||||
public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException {
|
||||
FieldsProducer postings = PostingsFormat.forName("Lucene99").fieldsProducer(state);
|
||||
if (state.context.context != IOContext.Context.MERGE) {
|
||||
if (state.context.context() != IOContext.Context.MERGE) {
|
||||
FieldsProducer loadedPostings;
|
||||
try {
|
||||
postings.checkIntegrity();
|
||||
|
|
|
@ -296,7 +296,7 @@ public class ConcurrentMergeScheduler extends MergeScheduler {
|
|||
// This Directory is only supposed to be used during merging,
|
||||
// so all writes should have MERGE context, else there is a bug
|
||||
// somewhere that is failing to pass down the right IOContext:
|
||||
assert context.context == IOContext.Context.MERGE : "got context=" + context.context;
|
||||
assert context.context() == IOContext.Context.MERGE : "got context=" + context.context();
|
||||
|
||||
return new RateLimitedIndexOutput(rateLimiter, in.createOutput(name, context));
|
||||
}
|
||||
|
|
|
@ -63,9 +63,9 @@ final class SegmentMerger {
|
|||
IOContext context,
|
||||
Executor intraMergeTaskExecutor)
|
||||
throws IOException {
|
||||
if (context.context != IOContext.Context.MERGE) {
|
||||
if (context.context() != IOContext.Context.MERGE) {
|
||||
throw new IllegalArgumentException(
|
||||
"IOContext.context should be MERGE; got: " + context.context);
|
||||
"IOContext.context should be MERGE; got: " + context.context());
|
||||
}
|
||||
mergeState = new MergeState(readers, segmentInfo, infoStream, intraMergeTaskExecutor);
|
||||
directory = dir;
|
||||
|
|
|
@ -369,7 +369,7 @@ public abstract class BufferedIndexInput extends IndexInput implements RandomAcc
|
|||
|
||||
/** Returns default buffer sizes for the given {@link IOContext} */
|
||||
public static int bufferSize(IOContext context) {
|
||||
switch (context.context) {
|
||||
switch (context.context()) {
|
||||
case MERGE:
|
||||
return MERGE_BUFFER_SIZE;
|
||||
case DEFAULT:
|
||||
|
|
|
@ -19,46 +19,7 @@ package org.apache.lucene.store;
|
|||
/**
|
||||
* A FlushInfo provides information required for a FLUSH context. It is used as part of an {@link
|
||||
* IOContext} in case of FLUSH context.
|
||||
*
|
||||
* <p>These values are only estimates and are not the actual values.
|
||||
*/
|
||||
public class FlushInfo {
|
||||
|
||||
public final int numDocs;
|
||||
|
||||
public final long estimatedSegmentSize;
|
||||
|
||||
/**
|
||||
* Creates a new {@link FlushInfo} instance from the values required for a FLUSH {@link IOContext}
|
||||
* context.
|
||||
*
|
||||
* <p>These values are only estimates and are not the actual values.
|
||||
*/
|
||||
public FlushInfo(int numDocs, long estimatedSegmentSize) {
|
||||
this.numDocs = numDocs;
|
||||
this.estimatedSegmentSize = estimatedSegmentSize;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
final int prime = 31;
|
||||
int result = 1;
|
||||
result = prime * result + (int) (estimatedSegmentSize ^ (estimatedSegmentSize >>> 32));
|
||||
result = prime * result + numDocs;
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) return true;
|
||||
if (obj == null) return false;
|
||||
if (getClass() != obj.getClass()) return false;
|
||||
FlushInfo other = (FlushInfo) obj;
|
||||
if (estimatedSegmentSize != other.estimatedSegmentSize) return false;
|
||||
if (numDocs != other.numDocs) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "FlushInfo [numDocs=" + numDocs + ", estimatedSegmentSize=" + estimatedSegmentSize + "]";
|
||||
}
|
||||
}
|
||||
public record FlushInfo(int numDocs, long estimatedSegmentSize) {}
|
||||
|
|
|
@ -23,8 +23,26 @@ import java.util.Objects;
|
|||
* initialized as null as passed as a parameter to either {@link
|
||||
* org.apache.lucene.store.Directory#openInput(String, IOContext)} or {@link
|
||||
* org.apache.lucene.store.Directory#createOutput(String, IOContext)}
|
||||
*
|
||||
* @param context An object of a enumerator Context type
|
||||
* @param mergeInfo must be given when {@code context == MERGE}
|
||||
* @param flushInfo must be given when {@code context == FLUSH}
|
||||
* @param readOnce This flag indicates that the file will be opened, then fully read sequentially
|
||||
* then closed.
|
||||
* @param load This flag is used for files that are a small fraction of the total index size and are
|
||||
* expected to be heavily accessed in random-access fashion. Some {@link Directory}
|
||||
* implementations may choose to load such files into physical memory (e.g. Java heap) as a way
|
||||
* to provide stronger guarantees on query latency.
|
||||
* @param randomAccess This flag indicates that the file will be accessed randomly. If this flag is
|
||||
* set, then readOnce will be false.
|
||||
*/
|
||||
public class IOContext {
|
||||
public record IOContext(
|
||||
Context context,
|
||||
MergeInfo mergeInfo,
|
||||
FlushInfo flushInfo,
|
||||
boolean readOnce,
|
||||
boolean load,
|
||||
boolean randomAccess) {
|
||||
|
||||
/**
|
||||
* Context is a enumerator which specifies the context in which the Directory is being used for.
|
||||
|
@ -36,30 +54,6 @@ public class IOContext {
|
|||
DEFAULT
|
||||
};
|
||||
|
||||
/** An object of a enumerator Context type */
|
||||
public final Context context;
|
||||
|
||||
public final MergeInfo mergeInfo;
|
||||
|
||||
public final FlushInfo flushInfo;
|
||||
|
||||
/** This flag indicates that the file will be opened, then fully read sequentially then closed. */
|
||||
public final boolean readOnce;
|
||||
|
||||
/**
|
||||
* This flag indicates that the file will be accessed randomly. If this flag is set, then readOnce
|
||||
* will be false.
|
||||
*/
|
||||
public final boolean randomAccess;
|
||||
|
||||
/**
|
||||
* This flag is used for files that are a small fraction of the total index size and are expected
|
||||
* to be heavily accessed in random-access fashion. Some {@link Directory} implementations may
|
||||
* choose to load such files into physical memory (e.g. Java heap) as a way to provide stronger
|
||||
* guarantees on query latency. If this flag is set, then {@link #randomAccess} will be true.
|
||||
*/
|
||||
public final boolean load;
|
||||
|
||||
public static final IOContext DEFAULT = new IOContext(Context.DEFAULT);
|
||||
|
||||
public static final IOContext READONCE = new IOContext(true, false, false);
|
||||
|
@ -70,106 +64,48 @@ public class IOContext {
|
|||
|
||||
public static final IOContext RANDOM = new IOContext(false, false, true);
|
||||
|
||||
public IOContext() {
|
||||
this(false, false, false);
|
||||
}
|
||||
|
||||
public IOContext(FlushInfo flushInfo) {
|
||||
assert flushInfo != null;
|
||||
this.context = Context.FLUSH;
|
||||
this.mergeInfo = null;
|
||||
this.readOnce = false;
|
||||
this.load = false;
|
||||
this.randomAccess = false;
|
||||
this.flushInfo = flushInfo;
|
||||
}
|
||||
|
||||
public IOContext(Context context) {
|
||||
this(context, null);
|
||||
}
|
||||
|
||||
private IOContext(boolean readOnce, boolean load, boolean randomAccess) {
|
||||
@SuppressWarnings("incomplete-switch")
|
||||
public IOContext {
|
||||
switch (context) {
|
||||
case MERGE -> Objects.requireNonNull(
|
||||
mergeInfo, "mergeInfo must not be null if context is MERGE");
|
||||
case FLUSH -> Objects.requireNonNull(
|
||||
flushInfo, "flushInfo must not be null if context is FLUSH");
|
||||
}
|
||||
if (load && readOnce) {
|
||||
throw new IllegalArgumentException("load and readOnce are mutually exclusive");
|
||||
}
|
||||
if (readOnce && randomAccess) {
|
||||
throw new IllegalArgumentException("cannot be both readOnce and randomAccess");
|
||||
throw new IllegalArgumentException("readOnce and randomAccess are mutually exclusive");
|
||||
}
|
||||
if (load && randomAccess == false) {
|
||||
throw new IllegalArgumentException("cannot be load but not randomAccess");
|
||||
}
|
||||
this.context = Context.READ;
|
||||
this.mergeInfo = null;
|
||||
this.readOnce = readOnce;
|
||||
this.load = load;
|
||||
this.randomAccess = randomAccess;
|
||||
this.flushInfo = null;
|
||||
}
|
||||
|
||||
private IOContext(boolean readOnce, boolean load, boolean randomAccess) {
|
||||
this(Context.READ, null, null, readOnce, load, randomAccess);
|
||||
}
|
||||
|
||||
private IOContext(Context context) {
|
||||
this(context, null, null, false, false, false);
|
||||
}
|
||||
|
||||
/** Creates an IOContext for flushing. */
|
||||
public IOContext(FlushInfo flushInfo) {
|
||||
this(Context.FLUSH, null, flushInfo, false, false, false);
|
||||
}
|
||||
|
||||
/** Creates an IOContext for merging. */
|
||||
public IOContext(MergeInfo mergeInfo) {
|
||||
this(Context.MERGE, mergeInfo);
|
||||
}
|
||||
|
||||
private IOContext(Context context, MergeInfo mergeInfo) {
|
||||
assert context != Context.MERGE || mergeInfo != null
|
||||
: "MergeInfo must not be null if context is MERGE";
|
||||
assert context != Context.FLUSH : "Use IOContext(FlushInfo) to create a FLUSH IOContext";
|
||||
this.context = context;
|
||||
this.readOnce = false;
|
||||
this.load = false;
|
||||
this.randomAccess = false;
|
||||
this.mergeInfo = mergeInfo;
|
||||
this.flushInfo = null;
|
||||
this(Context.MERGE, mergeInfo, null, false, false, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* This constructor is used to initialize a {@link IOContext} instance with a new value for the
|
||||
* readOnce variable.
|
||||
*
|
||||
* @param ctxt {@link IOContext} object whose information is used to create the new instance
|
||||
* except the readOnce variable.
|
||||
* @param readOnce The new {@link IOContext} object will use this value for readOnce.
|
||||
* Return a copy of this IOContext with {@link #readOnce} set to {@code true}. The {@link #load}
|
||||
* flag is set to {@code false}.
|
||||
*/
|
||||
public IOContext(IOContext ctxt, boolean readOnce) {
|
||||
this.context = ctxt.context;
|
||||
this.mergeInfo = ctxt.mergeInfo;
|
||||
this.flushInfo = ctxt.flushInfo;
|
||||
this.readOnce = readOnce;
|
||||
this.randomAccess = ctxt.randomAccess;
|
||||
this.load = false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(context, flushInfo, mergeInfo, readOnce, load, randomAccess);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) return true;
|
||||
if (obj == null) return false;
|
||||
if (getClass() != obj.getClass()) return false;
|
||||
IOContext other = (IOContext) obj;
|
||||
if (context != other.context) return false;
|
||||
if (!Objects.equals(flushInfo, other.flushInfo)) return false;
|
||||
if (!Objects.equals(mergeInfo, other.mergeInfo)) return false;
|
||||
if (readOnce != other.readOnce) return false;
|
||||
if (load != other.load) return false;
|
||||
if (randomAccess != other.randomAccess) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "IOContext [context="
|
||||
+ context
|
||||
+ ", mergeInfo="
|
||||
+ mergeInfo
|
||||
+ ", flushInfo="
|
||||
+ flushInfo
|
||||
+ ", readOnce="
|
||||
+ readOnce
|
||||
+ ", load="
|
||||
+ load
|
||||
+ ", randomAccess="
|
||||
+ randomAccess
|
||||
+ "]";
|
||||
public IOContext toReadOnce() {
|
||||
return new IOContext(context, mergeInfo, flushInfo, true, false, randomAccess);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -88,7 +88,7 @@ public class MMapDirectory extends FSDirectory {
|
|||
* opening them if they use the {@link IOContext#LOAD} I/O context.
|
||||
*/
|
||||
public static final BiPredicate<String, IOContext> BASED_ON_LOAD_IO_CONTEXT =
|
||||
(filename, context) -> context.load;
|
||||
(filename, context) -> context.load();
|
||||
|
||||
private BiPredicate<String, IOContext> preload = NO_FILES;
|
||||
|
||||
|
|
|
@ -19,65 +19,8 @@ package org.apache.lucene.store;
|
|||
/**
|
||||
* A MergeInfo provides information required for a MERGE context. It is used as part of an {@link
|
||||
* IOContext} in case of MERGE context.
|
||||
*
|
||||
* <p>These values are only estimates and are not the actual values.
|
||||
*/
|
||||
public class MergeInfo {
|
||||
|
||||
public final int totalMaxDoc;
|
||||
|
||||
public final long estimatedMergeBytes;
|
||||
|
||||
public final boolean isExternal;
|
||||
|
||||
public final int mergeMaxNumSegments;
|
||||
|
||||
/**
|
||||
* Creates a new {@link MergeInfo} instance from the values required for a MERGE {@link IOContext}
|
||||
* context.
|
||||
*
|
||||
* <p>These values are only estimates and are not the actual values.
|
||||
*/
|
||||
public MergeInfo(
|
||||
int totalMaxDoc, long estimatedMergeBytes, boolean isExternal, int mergeMaxNumSegments) {
|
||||
this.totalMaxDoc = totalMaxDoc;
|
||||
this.estimatedMergeBytes = estimatedMergeBytes;
|
||||
this.isExternal = isExternal;
|
||||
this.mergeMaxNumSegments = mergeMaxNumSegments;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
final int prime = 31;
|
||||
int result = 1;
|
||||
result = prime * result + (int) (estimatedMergeBytes ^ (estimatedMergeBytes >>> 32));
|
||||
result = prime * result + (isExternal ? 1231 : 1237);
|
||||
result = prime * result + mergeMaxNumSegments;
|
||||
result = prime * result + totalMaxDoc;
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) return true;
|
||||
if (obj == null) return false;
|
||||
if (getClass() != obj.getClass()) return false;
|
||||
MergeInfo other = (MergeInfo) obj;
|
||||
if (estimatedMergeBytes != other.estimatedMergeBytes) return false;
|
||||
if (isExternal != other.isExternal) return false;
|
||||
if (mergeMaxNumSegments != other.mergeMaxNumSegments) return false;
|
||||
if (totalMaxDoc != other.totalMaxDoc) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "MergeInfo [totalMaxDoc="
|
||||
+ totalMaxDoc
|
||||
+ ", estimatedMergeBytes="
|
||||
+ estimatedMergeBytes
|
||||
+ ", isExternal="
|
||||
+ isExternal
|
||||
+ ", mergeMaxNumSegments="
|
||||
+ mergeMaxNumSegments
|
||||
+ "]";
|
||||
}
|
||||
}
|
||||
public record MergeInfo(
|
||||
int totalMaxDoc, long estimatedMergeBytes, boolean isExternal, int mergeMaxNumSegments) {}
|
||||
|
|
|
@ -223,10 +223,10 @@ public class NRTCachingDirectory extends FilterDirectory implements Accountable
|
|||
// size=" + (merge==null ? 0 : merge.estimatedMergeBytes));
|
||||
|
||||
long bytes = 0;
|
||||
if (context.mergeInfo != null) {
|
||||
bytes = context.mergeInfo.estimatedMergeBytes;
|
||||
} else if (context.flushInfo != null) {
|
||||
bytes = context.flushInfo.estimatedSegmentSize;
|
||||
if (context.mergeInfo() != null) {
|
||||
bytes = context.mergeInfo().estimatedMergeBytes();
|
||||
} else if (context.flushInfo() != null) {
|
||||
bytes = context.flushInfo().estimatedSegmentSize();
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -139,13 +139,13 @@ final class PosixNativeAccess extends NativeAccess {
|
|||
private Integer mapIOContext(IOContext ctx) {
|
||||
// Merging always wins and implies sequential access, because kernel is advised to free pages
|
||||
// after use:
|
||||
if (ctx.context == Context.MERGE) {
|
||||
if (ctx.context() == Context.MERGE) {
|
||||
return POSIX_MADV_SEQUENTIAL;
|
||||
}
|
||||
if (ctx.randomAccess) {
|
||||
if (ctx.randomAccess()) {
|
||||
return POSIX_MADV_RANDOM;
|
||||
}
|
||||
if (ctx.readOnce) {
|
||||
if (ctx.readOnce()) {
|
||||
return POSIX_MADV_SEQUENTIAL;
|
||||
}
|
||||
return null;
|
||||
|
|
|
@ -48,14 +48,14 @@ public final class ByteWritesTrackingDirectoryWrapper extends FilterDirectory {
|
|||
@Override
|
||||
public IndexOutput createOutput(String name, IOContext ioContext) throws IOException {
|
||||
IndexOutput output = in.createOutput(name, ioContext);
|
||||
return createByteTrackingOutput(output, ioContext.context);
|
||||
return createByteTrackingOutput(output, ioContext.context());
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexOutput createTempOutput(String prefix, String suffix, IOContext ioContext)
|
||||
throws IOException {
|
||||
IndexOutput output = in.createTempOutput(prefix, suffix, ioContext);
|
||||
return trackTempOutput ? createByteTrackingOutput(output, ioContext.context) : output;
|
||||
return trackTempOutput ? createByteTrackingOutput(output, ioContext.context()) : output;
|
||||
}
|
||||
|
||||
private IndexOutput createByteTrackingOutput(IndexOutput output, IOContext.Context context) {
|
||||
|
|
|
@ -158,8 +158,8 @@ public class DirectIODirectory extends FilterDirectory {
|
|||
* requested from delegate directory.
|
||||
*/
|
||||
protected boolean useDirectIO(String name, IOContext context, OptionalLong fileLength) {
|
||||
return context.context == Context.MERGE
|
||||
&& context.mergeInfo.estimatedMergeBytes >= minBytesDirect
|
||||
return context.context() == Context.MERGE
|
||||
&& context.mergeInfo().estimatedMergeBytes() >= minBytesDirect
|
||||
&& fileLength.orElse(minBytesDirect) >= minBytesDirect;
|
||||
}
|
||||
|
||||
|
|
|
@ -146,7 +146,7 @@ public abstract class BaseCompoundFormatTestCase extends BaseIndexFileFormatTest
|
|||
// LUCENE-5724: things like NRTCachingDir rely upon IOContext being properly passed down
|
||||
public void testPassIOContext() throws IOException {
|
||||
final String testfile = "_123.test";
|
||||
final IOContext myContext = new IOContext();
|
||||
final IOContext myContext = IOContext.DEFAULT;
|
||||
|
||||
Directory dir =
|
||||
new FilterDirectory(newDirectory()) {
|
||||
|
|
|
@ -465,7 +465,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
|
|||
public void write(Fields fields, NormsProducer norms) throws IOException {
|
||||
fieldsConsumer.write(fields, norms);
|
||||
|
||||
boolean isMerge = state.context.context == IOContext.Context.MERGE;
|
||||
boolean isMerge = state.context.context() == IOContext.Context.MERGE;
|
||||
|
||||
// We only use one thread for flushing
|
||||
// in this test:
|
||||
|
|
|
@ -1778,18 +1778,19 @@ public abstract class LuceneTestCase extends Assert {
|
|||
public static IOContext newIOContext(Random random, IOContext oldContext) {
|
||||
final int randomNumDocs = random.nextInt(4192);
|
||||
final int size = random.nextInt(512) * randomNumDocs;
|
||||
if (oldContext.flushInfo != null) {
|
||||
if (oldContext.flushInfo() != null) {
|
||||
// Always return at least the estimatedSegmentSize of
|
||||
// the incoming IOContext:
|
||||
return new IOContext(
|
||||
new FlushInfo(randomNumDocs, Math.max(oldContext.flushInfo.estimatedSegmentSize, size)));
|
||||
} else if (oldContext.mergeInfo != null) {
|
||||
new FlushInfo(
|
||||
randomNumDocs, Math.max(oldContext.flushInfo().estimatedSegmentSize(), size)));
|
||||
} else if (oldContext.mergeInfo() != null) {
|
||||
// Always return at least the estimatedMergeBytes of
|
||||
// the incoming IOContext:
|
||||
return new IOContext(
|
||||
new MergeInfo(
|
||||
randomNumDocs,
|
||||
Math.max(oldContext.mergeInfo.estimatedMergeBytes, size),
|
||||
Math.max(oldContext.mergeInfo().estimatedMergeBytes(), size),
|
||||
random.nextBoolean(),
|
||||
TestUtil.nextInt(random, 1, 100)));
|
||||
} else {
|
||||
|
|
Loading…
Reference in New Issue