mirror of https://github.com/apache/lucene.git
enable error-prone "narrow calculation" check (#11923)
This check finds bugs such as https://github.com/apache/lucene/pull/11905. See https://errorprone.info/bugpattern/NarrowCalculation
This commit is contained in:
parent
e5426dbbd2
commit
1b9d98d6ec
|
@ -68,6 +68,7 @@ allprojects { prj ->
|
|||
|
||||
options.errorprone.disableWarningsInGeneratedCode = true
|
||||
options.errorprone.errorproneArgs = [
|
||||
'-XepAllErrorsAsWarnings', // warnings still fail build by default, but allows usage of -Pjavac.failOnWarnings=false
|
||||
'-Xep:InlineMeSuggester:OFF', // We don't use this annotation
|
||||
|
||||
// test
|
||||
|
@ -142,7 +143,6 @@ allprojects { prj ->
|
|||
'-Xep:ModifiedButNotUsed:OFF',
|
||||
'-Xep:MutablePublicArray:OFF',
|
||||
'-Xep:NarrowingCompoundAssignment:OFF',
|
||||
'-Xep:NarrowCalculation:OFF',
|
||||
'-Xep:NonAtomicVolatileUpdate:OFF',
|
||||
'-Xep:NonCanonicalType:OFF',
|
||||
'-Xep:ObjectToString:OFF',
|
||||
|
|
|
@ -537,7 +537,7 @@ public final class Lucene50CompressingStoredFieldsReader extends StoredFieldsRea
|
|||
if (bitsPerLength == 0) {
|
||||
final int length = fieldsStream.readVInt();
|
||||
for (int i = 0; i < chunkDocs; ++i) {
|
||||
offsets[1 + i] = (1 + i) * length;
|
||||
offsets[1 + i] = (1 + i) * (long) length;
|
||||
}
|
||||
} else if (bitsPerStoredFields > 31) {
|
||||
throw new CorruptIndexException("bitsPerLength=" + bitsPerLength, fieldsStream);
|
||||
|
|
|
@ -469,8 +469,9 @@ final class IndexedDISI extends DocIdSetIterator {
|
|||
// NO_MORE_DOCS
|
||||
final int inRangeBlockIndex =
|
||||
blockIndex < jumpTableEntryCount ? blockIndex : jumpTableEntryCount - 1;
|
||||
final int index = jumpTable.readInt(inRangeBlockIndex * Integer.BYTES * 2);
|
||||
final int offset = jumpTable.readInt(inRangeBlockIndex * Integer.BYTES * 2 + Integer.BYTES);
|
||||
final int index = jumpTable.readInt(inRangeBlockIndex * (long) Integer.BYTES * 2);
|
||||
final int offset =
|
||||
jumpTable.readInt(inRangeBlockIndex * (long) Integer.BYTES * 2 + Integer.BYTES);
|
||||
this.nextBlockIndex = index - 1; // -1 to compensate for the always-added 1 in readBlockHeader
|
||||
slice.seek(offset);
|
||||
readBlockHeader();
|
||||
|
@ -697,7 +698,7 @@ final class IndexedDISI extends DocIdSetIterator {
|
|||
|
||||
// Position the counting logic just after the rank point
|
||||
final int rankAlignedWordIndex = rankIndex << disi.denseRankPower >> 6;
|
||||
disi.slice.seek(disi.denseBitmapOffset + rankAlignedWordIndex * Long.BYTES);
|
||||
disi.slice.seek(disi.denseBitmapOffset + rankAlignedWordIndex * (long) Long.BYTES);
|
||||
long rankWord = disi.slice.readLong();
|
||||
int denseNOO = rank + Long.bitCount(rankWord);
|
||||
|
||||
|
|
|
@ -53,6 +53,7 @@ public class TestLucene60PointsFormat extends BasePointsFormatTestCase {
|
|||
return codec;
|
||||
}
|
||||
|
||||
@SuppressWarnings("NarrowCalculation")
|
||||
public void testEstimatePointCount() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriterConfig iwc = newIndexWriterConfig();
|
||||
|
@ -178,6 +179,7 @@ public class TestLucene60PointsFormat extends BasePointsFormatTestCase {
|
|||
|
||||
// The tree is always balanced in the N dims case, and leaves are
|
||||
// not all full so things are a bit different
|
||||
@SuppressWarnings("NarrowCalculation")
|
||||
public void testEstimatePointCount2Dims() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
|
||||
|
|
|
@ -101,6 +101,7 @@ public class TestLucene86PointsFormat extends BasePointsFormatTestCase {
|
|||
super.testMergeStability();
|
||||
}
|
||||
|
||||
@SuppressWarnings("NarrowCalculation")
|
||||
public void testEstimatePointCount() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriterConfig iwc = newIndexWriterConfig();
|
||||
|
@ -226,6 +227,7 @@ public class TestLucene86PointsFormat extends BasePointsFormatTestCase {
|
|||
|
||||
// The tree is always balanced in the N dims case, and leaves are
|
||||
// not all full so things are a bit different
|
||||
@SuppressWarnings("NarrowCalculation")
|
||||
public void testEstimatePointCount2Dims() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
|
||||
|
|
|
@ -1244,7 +1244,7 @@ public final class OrdsSegmentTermsEnum extends BaseTermsEnum {
|
|||
while (low <= high) {
|
||||
mid = (low + high) >>> 1;
|
||||
fstReader.setPosition(arc.posArcsStart());
|
||||
fstReader.skipBytes(arc.bytesPerArc() * mid);
|
||||
fstReader.skipBytes(arc.bytesPerArc() * (long) mid);
|
||||
final byte flags = fstReader.readByte();
|
||||
fr.index.readLabel(fstReader);
|
||||
final Output minArcOutput;
|
||||
|
|
|
@ -668,7 +668,7 @@ final class SimpleTextBKDWriter implements Closeable {
|
|||
|
||||
// Indexed by nodeID, but first (root) nodeID is 1. We do 1+ because the lead byte at each
|
||||
// recursion says which dim we split on.
|
||||
byte[] splitPackedValues = new byte[Math.toIntExact(numLeaves * (1 + config.bytesPerDim))];
|
||||
byte[] splitPackedValues = new byte[Math.multiplyExact(numLeaves, 1 + config.bytesPerDim)];
|
||||
|
||||
// +1 because leaf count is power of 2 (e.g. 8), and innerNodeCount is power of 2 minus 1 (e.g.
|
||||
// 7)
|
||||
|
|
|
@ -108,7 +108,7 @@ class SimpleTextDocValuesReader extends DocValuesProducer {
|
|||
assert startsWith(PATTERN);
|
||||
field.pattern = stripPrefix(PATTERN);
|
||||
field.dataStartFilePointer = data.getFilePointer();
|
||||
data.seek(data.getFilePointer() + (1 + field.pattern.length() + 2) * maxDoc);
|
||||
data.seek(data.getFilePointer() + (1 + field.pattern.length() + 2) * (long) maxDoc);
|
||||
} else if (dvType == DocValuesType.BINARY) {
|
||||
readLine();
|
||||
assert startsWith(MAXLENGTH);
|
||||
|
@ -118,7 +118,8 @@ class SimpleTextDocValuesReader extends DocValuesProducer {
|
|||
field.pattern = stripPrefix(PATTERN);
|
||||
field.dataStartFilePointer = data.getFilePointer();
|
||||
data.seek(
|
||||
data.getFilePointer() + (9 + field.pattern.length() + field.maxLength + 2) * maxDoc);
|
||||
data.getFilePointer()
|
||||
+ (9 + field.pattern.length() + field.maxLength + 2) * (long) maxDoc);
|
||||
} else if (dvType == DocValuesType.SORTED || dvType == DocValuesType.SORTED_SET) {
|
||||
readLine();
|
||||
assert startsWith(NUMVALUES);
|
||||
|
@ -136,7 +137,7 @@ class SimpleTextDocValuesReader extends DocValuesProducer {
|
|||
data.seek(
|
||||
data.getFilePointer()
|
||||
+ (9 + field.pattern.length() + field.maxLength) * field.numValues
|
||||
+ (1 + field.ordPattern.length()) * maxDoc);
|
||||
+ (1 + field.ordPattern.length()) * (long) maxDoc);
|
||||
} else {
|
||||
throw new AssertionError();
|
||||
}
|
||||
|
@ -214,7 +215,7 @@ class SimpleTextDocValuesReader extends DocValuesProducer {
|
|||
throw new IndexOutOfBoundsException(
|
||||
"docID must be 0 .. " + (maxDoc - 1) + "; got " + docID);
|
||||
}
|
||||
in.seek(field.dataStartFilePointer + (1 + field.pattern.length() + 2) * docID);
|
||||
in.seek(field.dataStartFilePointer + (1 + field.pattern.length() + 2) * (long) docID);
|
||||
SimpleTextUtil.readLine(in, scratch);
|
||||
// System.out.println("parsing delta: " + scratch.utf8ToString());
|
||||
BigDecimal bd;
|
||||
|
@ -262,7 +263,7 @@ class SimpleTextDocValuesReader extends DocValuesProducer {
|
|||
@Override
|
||||
public int advance(int target) throws IOException {
|
||||
for (int i = target; i < maxDoc; ++i) {
|
||||
in.seek(field.dataStartFilePointer + (1 + field.pattern.length() + 2) * i);
|
||||
in.seek(field.dataStartFilePointer + (1 + field.pattern.length() + 2) * (long) i);
|
||||
SimpleTextUtil.readLine(in, scratch); // data
|
||||
SimpleTextUtil.readLine(in, scratch); // 'T' or 'F'
|
||||
if (scratch.byteAt(0) == (byte) 'T') {
|
||||
|
@ -275,7 +276,7 @@ class SimpleTextDocValuesReader extends DocValuesProducer {
|
|||
@Override
|
||||
boolean advanceExact(int target) throws IOException {
|
||||
this.doc = target;
|
||||
in.seek(field.dataStartFilePointer + (1 + field.pattern.length() + 2) * target);
|
||||
in.seek(field.dataStartFilePointer + (1 + field.pattern.length() + 2) * (long) target);
|
||||
SimpleTextUtil.readLine(in, scratch); // data
|
||||
SimpleTextUtil.readLine(in, scratch); // 'T' or 'F'
|
||||
return scratch.byteAt(0) == (byte) 'T';
|
||||
|
@ -311,7 +312,7 @@ class SimpleTextDocValuesReader extends DocValuesProducer {
|
|||
}
|
||||
in.seek(
|
||||
field.dataStartFilePointer
|
||||
+ (9 + field.pattern.length() + field.maxLength + 2) * docID);
|
||||
+ (9 + field.pattern.length() + field.maxLength + 2) * (long) docID);
|
||||
SimpleTextUtil.readLine(in, scratch);
|
||||
assert StringHelper.startsWith(scratch.get(), LENGTH);
|
||||
int len;
|
||||
|
@ -401,7 +402,8 @@ class SimpleTextDocValuesReader extends DocValuesProducer {
|
|||
public int advance(int target) throws IOException {
|
||||
for (int i = target; i < maxDoc; ++i) {
|
||||
in.seek(
|
||||
field.dataStartFilePointer + (9 + field.pattern.length() + field.maxLength + 2) * i);
|
||||
field.dataStartFilePointer
|
||||
+ (9 + field.pattern.length() + field.maxLength + 2) * (long) i);
|
||||
SimpleTextUtil.readLine(in, scratch);
|
||||
assert StringHelper.startsWith(scratch.get(), LENGTH);
|
||||
int len;
|
||||
|
@ -435,7 +437,7 @@ class SimpleTextDocValuesReader extends DocValuesProducer {
|
|||
this.doc = target;
|
||||
in.seek(
|
||||
field.dataStartFilePointer
|
||||
+ (9 + field.pattern.length() + field.maxLength + 2) * target);
|
||||
+ (9 + field.pattern.length() + field.maxLength + 2) * (long) target);
|
||||
SimpleTextUtil.readLine(in, scratch);
|
||||
assert StringHelper.startsWith(scratch.get(), LENGTH);
|
||||
int len;
|
||||
|
@ -504,7 +506,7 @@ class SimpleTextDocValuesReader extends DocValuesProducer {
|
|||
in.seek(
|
||||
field.dataStartFilePointer
|
||||
+ field.numValues * (9 + field.pattern.length() + field.maxLength)
|
||||
+ i * (1 + field.ordPattern.length()));
|
||||
+ i * (long) (1 + field.ordPattern.length()));
|
||||
SimpleTextUtil.readLine(in, scratch);
|
||||
try {
|
||||
ord = (int) ordDecoder.parse(scratch.get().utf8ToString()).longValue() - 1;
|
||||
|
@ -524,7 +526,7 @@ class SimpleTextDocValuesReader extends DocValuesProducer {
|
|||
in.seek(
|
||||
field.dataStartFilePointer
|
||||
+ field.numValues * (9 + field.pattern.length() + field.maxLength)
|
||||
+ target * (1 + field.ordPattern.length()));
|
||||
+ target * (long) (1 + field.ordPattern.length()));
|
||||
SimpleTextUtil.readLine(in, scratch);
|
||||
try {
|
||||
ord = (int) ordDecoder.parse(scratch.get().utf8ToString()).longValue() - 1;
|
||||
|
@ -547,7 +549,9 @@ class SimpleTextDocValuesReader extends DocValuesProducer {
|
|||
throw new IndexOutOfBoundsException(
|
||||
"ord must be 0 .. " + (field.numValues - 1) + "; got " + ord);
|
||||
}
|
||||
in.seek(field.dataStartFilePointer + ord * (9 + field.pattern.length() + field.maxLength));
|
||||
in.seek(
|
||||
field.dataStartFilePointer
|
||||
+ ord * (long) (9 + field.pattern.length() + field.maxLength));
|
||||
SimpleTextUtil.readLine(in, scratch);
|
||||
assert StringHelper.startsWith(scratch.get(), LENGTH)
|
||||
: "got " + scratch.get().utf8ToString() + " in=" + in;
|
||||
|
@ -688,8 +692,8 @@ class SimpleTextDocValuesReader extends DocValuesProducer {
|
|||
for (int i = target; i < maxDoc; ++i) {
|
||||
in.seek(
|
||||
field.dataStartFilePointer
|
||||
+ field.numValues * (9 + field.pattern.length() + field.maxLength)
|
||||
+ i * (1 + field.ordPattern.length()));
|
||||
+ field.numValues * (long) (9 + field.pattern.length() + field.maxLength)
|
||||
+ i * (long) (1 + field.ordPattern.length()));
|
||||
SimpleTextUtil.readLine(in, scratch);
|
||||
String ordList = scratch.get().utf8ToString().trim();
|
||||
if (ordList.isEmpty() == false) {
|
||||
|
@ -705,8 +709,8 @@ class SimpleTextDocValuesReader extends DocValuesProducer {
|
|||
public boolean advanceExact(int target) throws IOException {
|
||||
in.seek(
|
||||
field.dataStartFilePointer
|
||||
+ field.numValues * (9 + field.pattern.length() + field.maxLength)
|
||||
+ target * (1 + field.ordPattern.length()));
|
||||
+ field.numValues * (long) (9 + field.pattern.length() + field.maxLength)
|
||||
+ target * (long) (1 + field.ordPattern.length()));
|
||||
SimpleTextUtil.readLine(in, scratch);
|
||||
String ordList = scratch.get().utf8ToString().trim();
|
||||
doc = target;
|
||||
|
|
|
@ -470,8 +470,9 @@ public final class IndexedDISI extends DocIdSetIterator {
|
|||
// NO_MORE_DOCS
|
||||
final int inRangeBlockIndex =
|
||||
blockIndex < jumpTableEntryCount ? blockIndex : jumpTableEntryCount - 1;
|
||||
final int index = jumpTable.readInt(inRangeBlockIndex * Integer.BYTES * 2);
|
||||
final int offset = jumpTable.readInt(inRangeBlockIndex * Integer.BYTES * 2 + Integer.BYTES);
|
||||
final int index = jumpTable.readInt(inRangeBlockIndex * (long) Integer.BYTES * 2);
|
||||
final int offset =
|
||||
jumpTable.readInt(inRangeBlockIndex * (long) Integer.BYTES * 2 + Integer.BYTES);
|
||||
this.nextBlockIndex = index - 1; // -1 to compensate for the always-added 1 in readBlockHeader
|
||||
slice.seek(offset);
|
||||
readBlockHeader();
|
||||
|
@ -698,7 +699,7 @@ public final class IndexedDISI extends DocIdSetIterator {
|
|||
|
||||
// Position the counting logic just after the rank point
|
||||
final int rankAlignedWordIndex = rankIndex << disi.denseRankPower >> 6;
|
||||
disi.slice.seek(disi.denseBitmapOffset + rankAlignedWordIndex * Long.BYTES);
|
||||
disi.slice.seek(disi.denseBitmapOffset + rankAlignedWordIndex * (long) Long.BYTES);
|
||||
long rankWord = disi.slice.readLong();
|
||||
int denseNOO = rank + Long.bitCount(rankWord);
|
||||
|
||||
|
|
|
@ -243,7 +243,7 @@ public final class Lucene90CompressingStoredFieldsWriter extends StoredFieldsWri
|
|||
lengths[i] = endOffsets[i] - endOffsets[i - 1];
|
||||
assert lengths[i] >= 0;
|
||||
}
|
||||
final boolean sliced = bufferedDocs.size() >= 2 * chunkSize;
|
||||
final boolean sliced = bufferedDocs.size() >= 2L * chunkSize;
|
||||
final boolean dirtyChunk = force;
|
||||
writeHeader(docBase, numBufferedDocs, numStoredFields, lengths, sliced, dirtyChunk);
|
||||
ByteBuffersDataInput bytebuffers = bufferedDocs.toDataInput();
|
||||
|
@ -729,7 +729,7 @@ public final class Lucene90CompressingStoredFieldsWriter extends StoredFieldsWri
|
|||
@Override
|
||||
public long ramBytesUsed() {
|
||||
return bufferedDocs.ramBytesUsed()
|
||||
+ numStoredFields.length * Integer.BYTES
|
||||
+ endOffsets.length * Integer.BYTES;
|
||||
+ numStoredFields.length * (long) Integer.BYTES
|
||||
+ endOffsets.length * (long) Integer.BYTES;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -138,7 +138,7 @@ final class BinaryDocValuesFieldUpdates extends DocValuesFieldUpdates {
|
|||
+ lengths.ramBytesUsed()
|
||||
+ RamUsageEstimator.NUM_BYTES_OBJECT_HEADER
|
||||
+ 2 * Integer.BYTES
|
||||
+ 3 * RamUsageEstimator.NUM_BYTES_OBJECT_REF
|
||||
+ 3 * (long) RamUsageEstimator.NUM_BYTES_OBJECT_REF
|
||||
+ values.bytes().length;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -190,8 +190,10 @@ public abstract class BufferingKnnVectorsWriter extends KnnVectorsWriter {
|
|||
if (vectors.size() == 0) return 0;
|
||||
return docsWithField.ramBytesUsed()
|
||||
+ vectors.size()
|
||||
* (RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_ARRAY_HEADER)
|
||||
+ vectors.size() * dim * Float.BYTES;
|
||||
* (long)
|
||||
(RamUsageEstimator.NUM_BYTES_OBJECT_REF
|
||||
+ RamUsageEstimator.NUM_BYTES_ARRAY_HEADER)
|
||||
+ vectors.size() * (long) dim * Float.BYTES;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -69,9 +69,9 @@ abstract class DocValuesUpdate {
|
|||
|
||||
final long sizeInBytes() {
|
||||
long sizeInBytes = RAW_SIZE_IN_BYTES;
|
||||
sizeInBytes += term.field.length() * Character.BYTES;
|
||||
sizeInBytes += term.field.length() * (long) Character.BYTES;
|
||||
sizeInBytes += term.bytes.bytes.length;
|
||||
sizeInBytes += field.length() * Character.BYTES;
|
||||
sizeInBytes += field.length() * (long) Character.BYTES;
|
||||
sizeInBytes += valueSizeInBytes();
|
||||
sizeInBytes += 1; // hasValue
|
||||
return sizeInBytes;
|
||||
|
|
|
@ -85,7 +85,7 @@ final class DocumentsWriterFlushControl implements Accountable, Closeable {
|
|||
this.perThreadPool = documentsWriter.perThreadPool;
|
||||
this.flushPolicy = config.getFlushPolicy();
|
||||
this.config = config;
|
||||
this.hardMaxBytesPerDWPT = config.getRAMPerThreadHardLimitMB() * 1024 * 1024;
|
||||
this.hardMaxBytesPerDWPT = config.getRAMPerThreadHardLimitMB() * 1024L * 1024L;
|
||||
this.documentsWriter = documentsWriter;
|
||||
}
|
||||
|
||||
|
|
|
@ -620,7 +620,7 @@ final class DocumentsWriterPerThread implements Accountable {
|
|||
@Override
|
||||
public long ramBytesUsed() {
|
||||
assert lock.isHeldByCurrentThread();
|
||||
return (deleteDocIDs.length * Integer.BYTES)
|
||||
return (deleteDocIDs.length * (long) Integer.BYTES)
|
||||
+ pendingUpdates.ramBytesUsed()
|
||||
+ indexingChain.ramBytesUsed();
|
||||
}
|
||||
|
|
|
@ -82,7 +82,7 @@ final class FieldUpdatesBuffer {
|
|||
}
|
||||
|
||||
private static long sizeOfString(String string) {
|
||||
return STRING_SHALLOW_SIZE + (string.length() * Character.BYTES);
|
||||
return STRING_SHALLOW_SIZE + (string.length() * (long) Character.BYTES);
|
||||
}
|
||||
|
||||
FieldUpdatesBuffer(
|
||||
|
@ -130,7 +130,7 @@ final class FieldUpdatesBuffer {
|
|||
Arrays.fill(array, 1, ord, fields[0]);
|
||||
}
|
||||
bytesUsed.addAndGet(
|
||||
(array.length - fields.length) * RamUsageEstimator.NUM_BYTES_OBJECT_REF);
|
||||
(array.length - fields.length) * (long) RamUsageEstimator.NUM_BYTES_OBJECT_REF);
|
||||
fields = array;
|
||||
}
|
||||
if (field != fields[0]) { // that's an easy win of not accounting if there is an outlier
|
||||
|
@ -145,7 +145,7 @@ final class FieldUpdatesBuffer {
|
|||
if (docsUpTo.length == 1) {
|
||||
Arrays.fill(array, 1, ord, docsUpTo[0]);
|
||||
}
|
||||
bytesUsed.addAndGet((array.length - docsUpTo.length) * Integer.BYTES);
|
||||
bytesUsed.addAndGet((array.length - docsUpTo.length) * (long) Integer.BYTES);
|
||||
docsUpTo = array;
|
||||
}
|
||||
docsUpTo[ord] = docUpTo;
|
||||
|
@ -181,7 +181,7 @@ final class FieldUpdatesBuffer {
|
|||
if (numericValues.length == 1) {
|
||||
Arrays.fill(array, 1, ord, numericValues[0]);
|
||||
}
|
||||
bytesUsed.addAndGet((array.length - numericValues.length) * Long.BYTES);
|
||||
bytesUsed.addAndGet((array.length - numericValues.length) * (long) Long.BYTES);
|
||||
numericValues = array;
|
||||
}
|
||||
numericValues[ord] = value;
|
||||
|
|
|
@ -112,7 +112,7 @@ final class FrozenBufferedUpdates {
|
|||
|
||||
bytesUsed =
|
||||
(int)
|
||||
((deleteTerms.ramBytesUsed() + deleteQueries.length * BYTES_PER_DEL_QUERY)
|
||||
((deleteTerms.ramBytesUsed() + deleteQueries.length * (long) BYTES_PER_DEL_QUERY)
|
||||
+ updates.fieldUpdatesBytesUsed.get());
|
||||
|
||||
numTermDeletes = updates.numTermDeletes.get();
|
||||
|
|
|
@ -5295,7 +5295,7 @@ public class IndexWriter
|
|||
Locale.ROOT,
|
||||
"merged segment size=%.3f MB vs estimate=%.3f MB",
|
||||
merge.info.sizeInBytes() / 1024. / 1024.,
|
||||
merge.estimatedMergeBytes / 1024 / 1024.));
|
||||
merge.estimatedMergeBytes / 1024. / 1024.));
|
||||
}
|
||||
|
||||
final IndexReaderWarmer mergedSegmentWarmer = config.getMergedSegmentWarmer();
|
||||
|
|
|
@ -509,7 +509,7 @@ public abstract class LogMergePolicy extends MergePolicy {
|
|||
+ " level="
|
||||
+ infoLevel.level
|
||||
+ " size="
|
||||
+ String.format(Locale.ROOT, "%.3f MB", segBytes / 1024 / 1024.)
|
||||
+ String.format(Locale.ROOT, "%.3f MB", segBytes / 1024. / 1024.)
|
||||
+ extra,
|
||||
mergeContext);
|
||||
}
|
||||
|
|
|
@ -803,7 +803,7 @@ public abstract class MergePolicy {
|
|||
|
||||
/** Returns the largest size allowed for a compound file segment */
|
||||
public double getMaxCFSSegmentSizeMB() {
|
||||
return maxCFSSegmentSize / 1024 / 1024.;
|
||||
return maxCFSSegmentSize / 1024. / 1024.;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -66,7 +66,7 @@ class PointValuesWriter {
|
|||
|
||||
if (docIDs.length == numPoints) {
|
||||
docIDs = ArrayUtil.grow(docIDs, numPoints + 1);
|
||||
iwBytesUsed.addAndGet((docIDs.length - numPoints) * Integer.BYTES);
|
||||
iwBytesUsed.addAndGet((docIDs.length - numPoints) * (long) Integer.BYTES);
|
||||
}
|
||||
final long bytesRamBytesUsedBefore = bytes.ramBytesUsed();
|
||||
bytesOut.writeBytes(value.bytes, value.offset, value.length);
|
||||
|
|
|
@ -143,7 +143,7 @@ class SortedSetDocValuesWriter extends DocValuesWriter<SortedSetDocValues> {
|
|||
|
||||
if (currentUpto == currentValues.length) {
|
||||
currentValues = ArrayUtil.grow(currentValues, currentValues.length + 1);
|
||||
iwBytesUsed.addAndGet((currentValues.length - currentUpto) * Integer.BYTES);
|
||||
iwBytesUsed.addAndGet((currentValues.length - currentUpto) * (long) Integer.BYTES);
|
||||
}
|
||||
|
||||
currentValues[currentUpto] = termID;
|
||||
|
|
|
@ -287,7 +287,8 @@ abstract class TermsHashPerField implements Comparable<TermsHashPerField> {
|
|||
if (perField.postingsArray == null) {
|
||||
perField.postingsArray = perField.createPostingsArray(2);
|
||||
perField.newPostingsArray();
|
||||
bytesUsed.addAndGet(perField.postingsArray.size * perField.postingsArray.bytesPerPosting());
|
||||
bytesUsed.addAndGet(
|
||||
perField.postingsArray.size * (long) perField.postingsArray.bytesPerPosting());
|
||||
}
|
||||
return perField.postingsArray.textStarts;
|
||||
}
|
||||
|
@ -298,7 +299,7 @@ abstract class TermsHashPerField implements Comparable<TermsHashPerField> {
|
|||
final int oldSize = perField.postingsArray.size;
|
||||
postingsArray = perField.postingsArray = postingsArray.grow();
|
||||
perField.newPostingsArray();
|
||||
bytesUsed.addAndGet((postingsArray.bytesPerPosting() * (postingsArray.size - oldSize)));
|
||||
bytesUsed.addAndGet(postingsArray.bytesPerPosting() * (long) (postingsArray.size - oldSize));
|
||||
return postingsArray.textStarts;
|
||||
}
|
||||
|
||||
|
|
|
@ -342,7 +342,7 @@ public class TieredMergePolicy extends MergePolicy {
|
|||
" seg="
|
||||
+ segString(mergeContext, Collections.singleton(segSizeDocs.segInfo))
|
||||
+ " size="
|
||||
+ String.format(Locale.ROOT, "%.3f", segBytes / 1024 / 1024.)
|
||||
+ String.format(Locale.ROOT, "%.3f", segBytes / 1024. / 1024.)
|
||||
+ " MB"
|
||||
+ extra,
|
||||
mergeContext);
|
||||
|
@ -962,8 +962,8 @@ public class TieredMergePolicy extends MergePolicy {
|
|||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder("[" + getClass().getSimpleName() + ": ");
|
||||
sb.append("maxMergeAtOnce=").append(maxMergeAtOnce).append(", ");
|
||||
sb.append("maxMergedSegmentMB=").append(maxMergedSegmentBytes / 1024 / 1024.).append(", ");
|
||||
sb.append("floorSegmentMB=").append(floorSegmentBytes / 1024 / 1024.).append(", ");
|
||||
sb.append("maxMergedSegmentMB=").append(maxMergedSegmentBytes / 1024. / 1024.).append(", ");
|
||||
sb.append("floorSegmentMB=").append(floorSegmentBytes / 1024. / 1024.).append(", ");
|
||||
sb.append("forceMergeDeletesPctAllowed=").append(forceMergeDeletesPctAllowed).append(", ");
|
||||
sb.append("segmentsPerTier=").append(segsPerTier).append(", ");
|
||||
sb.append("maxCFSSegmentSizeMB=").append(getMaxCFSSegmentSizeMB()).append(", ");
|
||||
|
|
|
@ -435,7 +435,8 @@ public class LRUQueryCache implements QueryCache, Accountable {
|
|||
long recomputedRamBytesUsed =
|
||||
HASHTABLE_RAM_BYTES_PER_ENTRY * cache.size()
|
||||
+ LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY * uniqueQueries.size();
|
||||
recomputedRamBytesUsed += mostRecentlyUsedQueries.size() * QUERY_DEFAULT_RAM_BYTES_USED;
|
||||
recomputedRamBytesUsed +=
|
||||
mostRecentlyUsedQueries.size() * (long) QUERY_DEFAULT_RAM_BYTES_USED;
|
||||
for (LeafCache leafCache : cache.values()) {
|
||||
recomputedRamBytesUsed += HASHTABLE_RAM_BYTES_PER_ENTRY * leafCache.cache.size();
|
||||
for (CacheAndCount cached : leafCache.cache.values()) {
|
||||
|
|
|
@ -97,9 +97,9 @@ public class NRTCachingDirectory extends FilterDirectory implements Accountable
|
|||
return "NRTCachingDirectory("
|
||||
+ in
|
||||
+ "; maxCacheMB="
|
||||
+ (maxCachedBytes / 1024 / 1024.)
|
||||
+ (maxCachedBytes / 1024. / 1024.)
|
||||
+ " maxMergeSizeMB="
|
||||
+ (maxMergeSizeBytes / 1024 / 1024.)
|
||||
+ (maxMergeSizeBytes / 1024. / 1024.)
|
||||
+ ")";
|
||||
}
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@ public final class BytesRefArray implements SortableBytesRefArray {
|
|||
public BytesRefArray(Counter bytesUsed) {
|
||||
this.pool = new ByteBlockPool(new ByteBlockPool.DirectTrackingAllocator(bytesUsed));
|
||||
pool.nextBuffer();
|
||||
bytesUsed.addAndGet(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER * Integer.BYTES);
|
||||
bytesUsed.addAndGet(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER * (long) Integer.BYTES);
|
||||
this.bytesUsed = bytesUsed;
|
||||
}
|
||||
|
||||
|
@ -66,7 +66,7 @@ public final class BytesRefArray implements SortableBytesRefArray {
|
|||
if (lastElement >= offsets.length) {
|
||||
int oldLen = offsets.length;
|
||||
offsets = ArrayUtil.grow(offsets, offsets.length + 1);
|
||||
bytesUsed.addAndGet((offsets.length - oldLen) * Integer.BYTES);
|
||||
bytesUsed.addAndGet((offsets.length - oldLen) * (long) Integer.BYTES);
|
||||
}
|
||||
pool.append(bytes);
|
||||
offsets[lastElement++] = currentOffset;
|
||||
|
|
|
@ -85,7 +85,7 @@ public final class BytesRefHash implements Accountable {
|
|||
bytesStart = bytesStartArray.init();
|
||||
bytesUsed =
|
||||
bytesStartArray.bytesUsed() == null ? Counter.newCounter() : bytesStartArray.bytesUsed();
|
||||
bytesUsed.addAndGet(hashSize * Integer.BYTES);
|
||||
bytesUsed.addAndGet(hashSize * (long) Integer.BYTES);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -195,7 +195,7 @@ public final class BytesRefHash implements Accountable {
|
|||
newSize /= 2;
|
||||
}
|
||||
if (newSize != hashSize) {
|
||||
bytesUsed.addAndGet(Integer.BYTES * -(hashSize - newSize));
|
||||
bytesUsed.addAndGet(Integer.BYTES * (long) -(hashSize - newSize));
|
||||
hashSize = newSize;
|
||||
ids = new int[hashSize];
|
||||
Arrays.fill(ids, -1);
|
||||
|
@ -230,7 +230,7 @@ public final class BytesRefHash implements Accountable {
|
|||
public void close() {
|
||||
clear(true);
|
||||
ids = null;
|
||||
bytesUsed.addAndGet(Integer.BYTES * -hashSize);
|
||||
bytesUsed.addAndGet(Integer.BYTES * (long) -hashSize);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -373,7 +373,7 @@ public final class BytesRefHash implements Accountable {
|
|||
*/
|
||||
private void rehash(final int newSize, boolean hashOnData) {
|
||||
final int newMask = newSize - 1;
|
||||
bytesUsed.addAndGet(Integer.BYTES * (newSize));
|
||||
bytesUsed.addAndGet(Integer.BYTES * (long) newSize);
|
||||
final int[] newHash = new int[newSize];
|
||||
Arrays.fill(newHash, -1);
|
||||
for (int i = 0; i < hashSize; i++) {
|
||||
|
@ -414,7 +414,7 @@ public final class BytesRefHash implements Accountable {
|
|||
}
|
||||
|
||||
hashMask = newMask;
|
||||
bytesUsed.addAndGet(Integer.BYTES * (-ids.length));
|
||||
bytesUsed.addAndGet(Integer.BYTES * (long) -ids.length);
|
||||
ids = newHash;
|
||||
hashSize = newSize;
|
||||
hashHalfSize = newSize / 2;
|
||||
|
@ -436,7 +436,7 @@ public final class BytesRefHash implements Accountable {
|
|||
|
||||
if (ids == null) {
|
||||
ids = new int[hashSize];
|
||||
bytesUsed.addAndGet(Integer.BYTES * hashSize);
|
||||
bytesUsed.addAndGet(Integer.BYTES * (long) hashSize);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -95,7 +95,7 @@ public final class RecyclingByteBlockAllocator extends ByteBlockPool.Allocator {
|
|||
for (int i = stop; i < end; i++) {
|
||||
blocks[i] = null;
|
||||
}
|
||||
bytesUsed.addAndGet(-(end - stop) * blockSize);
|
||||
bytesUsed.addAndGet(-(end - stop) * (long) blockSize);
|
||||
assert bytesUsed.get() >= 0;
|
||||
}
|
||||
|
||||
|
@ -140,7 +140,7 @@ public final class RecyclingByteBlockAllocator extends ByteBlockPool.Allocator {
|
|||
while (freeBlocks > stop) {
|
||||
freeByteBlocks[--freeBlocks] = null;
|
||||
}
|
||||
bytesUsed.addAndGet(-count * blockSize);
|
||||
bytesUsed.addAndGet(-count * (long) blockSize);
|
||||
assert bytesUsed.get() >= 0;
|
||||
return count;
|
||||
}
|
||||
|
|
|
@ -69,7 +69,7 @@ public final class RecyclingIntBlockAllocator extends Allocator {
|
|||
@Override
|
||||
public int[] getIntBlock() {
|
||||
if (freeBlocks == 0) {
|
||||
bytesUsed.addAndGet(blockSize * Integer.BYTES);
|
||||
bytesUsed.addAndGet(blockSize * (long) Integer.BYTES);
|
||||
return new int[blockSize];
|
||||
}
|
||||
final int[] b = freeByteBlocks[--freeBlocks];
|
||||
|
@ -95,7 +95,7 @@ public final class RecyclingIntBlockAllocator extends Allocator {
|
|||
for (int i = stop; i < end; i++) {
|
||||
blocks[i] = null;
|
||||
}
|
||||
bytesUsed.addAndGet(-(end - stop) * (blockSize * Integer.BYTES));
|
||||
bytesUsed.addAndGet(-(end - stop) * ((long) blockSize * Integer.BYTES));
|
||||
assert bytesUsed.get() >= 0;
|
||||
}
|
||||
|
||||
|
@ -140,7 +140,7 @@ public final class RecyclingIntBlockAllocator extends Allocator {
|
|||
while (freeBlocks > stop) {
|
||||
freeByteBlocks[--freeBlocks] = null;
|
||||
}
|
||||
bytesUsed.addAndGet(-count * blockSize * Integer.BYTES);
|
||||
bytesUsed.addAndGet(-count * (long) blockSize * Integer.BYTES);
|
||||
assert bytesUsed.get() >= 0;
|
||||
return count;
|
||||
}
|
||||
|
|
|
@ -358,7 +358,7 @@ public class LevenshteinAutomata {
|
|||
};
|
||||
|
||||
protected int unpack(long[] data, int index, int bitsPerValue) {
|
||||
final long bitLoc = bitsPerValue * index;
|
||||
final long bitLoc = bitsPerValue * (long) index;
|
||||
final int dataLoc = (int) (bitLoc >> 6);
|
||||
final int bitStart = (int) (bitLoc & 63);
|
||||
// System.out.println("index=" + index + " dataLoc=" + dataLoc + " bitStart=" + bitStart + "
|
||||
|
|
|
@ -868,7 +868,7 @@ public final class FST<T> implements Accountable {
|
|||
|
||||
// Expand the arcs in place, backwards.
|
||||
long srcPos = fstCompiler.bytes.getPosition();
|
||||
long destPos = startAddress + headerLen + nodeIn.numArcs * maxBytesPerArc;
|
||||
long destPos = startAddress + headerLen + nodeIn.numArcs * (long) maxBytesPerArc;
|
||||
assert destPos >= srcPos;
|
||||
if (destPos > srcPos) {
|
||||
fstCompiler.bytes.skipBytes((int) (destPos - srcPos));
|
||||
|
@ -1233,7 +1233,7 @@ public final class FST<T> implements Accountable {
|
|||
// Arcs have fixed length.
|
||||
if (arc.nodeFlags() == ARCS_FOR_BINARY_SEARCH) {
|
||||
// Point to next arc, -1 to skip arc flags.
|
||||
in.setPosition(arc.posArcsStart() - (1 + arc.arcIdx()) * arc.bytesPerArc() - 1);
|
||||
in.setPosition(arc.posArcsStart() - (1 + arc.arcIdx()) * (long) arc.bytesPerArc() - 1);
|
||||
} else {
|
||||
assert arc.nodeFlags() == ARCS_FOR_DIRECT_ADDRESSING;
|
||||
// Direct addressing node. The label is not stored but rather inferred
|
||||
|
@ -1258,7 +1258,7 @@ public final class FST<T> implements Accountable {
|
|||
assert arc.bytesPerArc() > 0;
|
||||
assert arc.nodeFlags() == ARCS_FOR_BINARY_SEARCH;
|
||||
assert idx >= 0 && idx < arc.numArcs();
|
||||
in.setPosition(arc.posArcsStart() - idx * arc.bytesPerArc());
|
||||
in.setPosition(arc.posArcsStart() - idx * (long) arc.bytesPerArc());
|
||||
arc.arcIdx = idx;
|
||||
arc.flags = in.readByte();
|
||||
return readArc(arc, in);
|
||||
|
@ -1285,7 +1285,7 @@ public final class FST<T> implements Accountable {
|
|||
*/
|
||||
private Arc<T> readArcByDirectAddressing(
|
||||
Arc<T> arc, final BytesReader in, int rangeIndex, int presenceIndex) throws IOException {
|
||||
in.setPosition(arc.posArcsStart() - presenceIndex * arc.bytesPerArc());
|
||||
in.setPosition(arc.posArcsStart() - presenceIndex * (long) arc.bytesPerArc());
|
||||
arc.arcIdx = rangeIndex;
|
||||
arc.presenceIndex = presenceIndex;
|
||||
arc.flags = in.readByte();
|
||||
|
@ -1314,7 +1314,7 @@ public final class FST<T> implements Accountable {
|
|||
assert arc.bytesPerArc() > 0;
|
||||
arc.arcIdx++;
|
||||
assert arc.arcIdx() >= 0 && arc.arcIdx() < arc.numArcs();
|
||||
in.setPosition(arc.posArcsStart() - arc.arcIdx() * arc.bytesPerArc());
|
||||
in.setPosition(arc.posArcsStart() - arc.arcIdx() * (long) arc.bytesPerArc());
|
||||
arc.flags = in.readByte();
|
||||
break;
|
||||
|
||||
|
@ -1377,7 +1377,7 @@ public final class FST<T> implements Accountable {
|
|||
arc.nodeFlags == ARCS_FOR_DIRECT_ADDRESSING
|
||||
? BitTable.countBits(arc, in)
|
||||
: arc.numArcs();
|
||||
in.setPosition(arc.posArcsStart() - arc.bytesPerArc() * numArcs);
|
||||
in.setPosition(arc.posArcsStart() - arc.bytesPerArc() * (long) numArcs);
|
||||
}
|
||||
}
|
||||
arc.target = in.getPosition();
|
||||
|
|
|
@ -98,6 +98,7 @@ public class TestLucene90PointsFormat extends BasePointsFormatTestCase {
|
|||
super.testMergeStability();
|
||||
}
|
||||
|
||||
@SuppressWarnings("NarrowCalculation")
|
||||
public void testEstimatePointCount() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriterConfig iwc = newIndexWriterConfig();
|
||||
|
@ -223,6 +224,7 @@ public class TestLucene90PointsFormat extends BasePointsFormatTestCase {
|
|||
|
||||
// The tree is always balanced in the N dims case, and leaves are
|
||||
// not all full so things are a bit different
|
||||
@SuppressWarnings("NarrowCalculation")
|
||||
public void testEstimatePointCount2Dims() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
|
||||
|
|
|
@ -468,11 +468,11 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
|
|||
|
||||
lmp.setMaxCFSSegmentSizeMB(Double.POSITIVE_INFINITY);
|
||||
assertEquals(
|
||||
Long.MAX_VALUE / 1024 / 1024., lmp.getMaxCFSSegmentSizeMB(), EPSILON * Long.MAX_VALUE);
|
||||
Long.MAX_VALUE / 1024. / 1024., lmp.getMaxCFSSegmentSizeMB(), EPSILON * Long.MAX_VALUE);
|
||||
|
||||
lmp.setMaxCFSSegmentSizeMB(Long.MAX_VALUE / 1024 / 1024.);
|
||||
lmp.setMaxCFSSegmentSizeMB(Long.MAX_VALUE / 1024. / 1024.);
|
||||
assertEquals(
|
||||
Long.MAX_VALUE / 1024 / 1024., lmp.getMaxCFSSegmentSizeMB(), EPSILON * Long.MAX_VALUE);
|
||||
Long.MAX_VALUE / 1024. / 1024., lmp.getMaxCFSSegmentSizeMB(), EPSILON * Long.MAX_VALUE);
|
||||
|
||||
expectThrows(
|
||||
IllegalArgumentException.class,
|
||||
|
|
|
@ -755,11 +755,11 @@ public class TestTieredMergePolicy extends BaseMergePolicyTestCase {
|
|||
|
||||
tmp.setMaxMergedSegmentMB(Double.POSITIVE_INFINITY);
|
||||
assertEquals(
|
||||
Long.MAX_VALUE / 1024 / 1024., tmp.getMaxMergedSegmentMB(), EPSILON * Long.MAX_VALUE);
|
||||
Long.MAX_VALUE / 1024. / 1024., tmp.getMaxMergedSegmentMB(), EPSILON * Long.MAX_VALUE);
|
||||
|
||||
tmp.setMaxMergedSegmentMB(Long.MAX_VALUE / 1024 / 1024.);
|
||||
tmp.setMaxMergedSegmentMB(Long.MAX_VALUE / 1024. / 1024.);
|
||||
assertEquals(
|
||||
Long.MAX_VALUE / 1024 / 1024., tmp.getMaxMergedSegmentMB(), EPSILON * Long.MAX_VALUE);
|
||||
Long.MAX_VALUE / 1024. / 1024., tmp.getMaxMergedSegmentMB(), EPSILON * Long.MAX_VALUE);
|
||||
|
||||
expectThrows(
|
||||
IllegalArgumentException.class,
|
||||
|
@ -771,10 +771,10 @@ public class TestTieredMergePolicy extends BaseMergePolicyTestCase {
|
|||
assertEquals(2.0, tmp.getFloorSegmentMB(), EPSILON);
|
||||
|
||||
tmp.setFloorSegmentMB(Double.POSITIVE_INFINITY);
|
||||
assertEquals(Long.MAX_VALUE / 1024 / 1024., tmp.getFloorSegmentMB(), EPSILON * Long.MAX_VALUE);
|
||||
assertEquals(Long.MAX_VALUE / 1024. / 1024., tmp.getFloorSegmentMB(), EPSILON * Long.MAX_VALUE);
|
||||
|
||||
tmp.setFloorSegmentMB(Long.MAX_VALUE / 1024 / 1024.);
|
||||
assertEquals(Long.MAX_VALUE / 1024 / 1024., tmp.getFloorSegmentMB(), EPSILON * Long.MAX_VALUE);
|
||||
tmp.setFloorSegmentMB(Long.MAX_VALUE / 1024. / 1024.);
|
||||
assertEquals(Long.MAX_VALUE / 1024. / 1024., tmp.getFloorSegmentMB(), EPSILON * Long.MAX_VALUE);
|
||||
|
||||
expectThrows(
|
||||
IllegalArgumentException.class,
|
||||
|
@ -787,11 +787,11 @@ public class TestTieredMergePolicy extends BaseMergePolicyTestCase {
|
|||
|
||||
tmp.setMaxCFSSegmentSizeMB(Double.POSITIVE_INFINITY);
|
||||
assertEquals(
|
||||
Long.MAX_VALUE / 1024 / 1024., tmp.getMaxCFSSegmentSizeMB(), EPSILON * Long.MAX_VALUE);
|
||||
Long.MAX_VALUE / 1024. / 1024., tmp.getMaxCFSSegmentSizeMB(), EPSILON * Long.MAX_VALUE);
|
||||
|
||||
tmp.setMaxCFSSegmentSizeMB(Long.MAX_VALUE / 1024 / 1024.);
|
||||
tmp.setMaxCFSSegmentSizeMB(Long.MAX_VALUE / 1024. / 1024.);
|
||||
assertEquals(
|
||||
Long.MAX_VALUE / 1024 / 1024., tmp.getMaxCFSSegmentSizeMB(), EPSILON * Long.MAX_VALUE);
|
||||
Long.MAX_VALUE / 1024. / 1024., tmp.getMaxCFSSegmentSizeMB(), EPSILON * Long.MAX_VALUE);
|
||||
|
||||
expectThrows(
|
||||
IllegalArgumentException.class,
|
||||
|
|
|
@ -87,7 +87,7 @@ public final class TestRateLimiter extends LuceneTestCase {
|
|||
}
|
||||
long endNS = System.nanoTime();
|
||||
double actualMBPerSec =
|
||||
(totBytes.get() / 1024 / 1024.)
|
||||
(totBytes.get() / 1024. / 1024.)
|
||||
/ ((endNS - startNS) / (double) TimeUnit.SECONDS.toNanos(1));
|
||||
|
||||
// TODO: this may false trip .... could be we can only assert that it never exceeds the max, so
|
||||
|
|
|
@ -36,6 +36,7 @@ public class TestFixedBitSet extends BaseBitSetTestCase<FixedBitSet> {
|
|||
return set;
|
||||
}
|
||||
|
||||
@SuppressWarnings("NarrowCalculation")
|
||||
public void testApproximateCardinality() {
|
||||
// The approximate cardinality works in such a way that it should be pretty accurate on a bitset
|
||||
// whose bits are uniformly distributed.
|
||||
|
|
|
@ -69,7 +69,7 @@ public class RangeFacetsExample implements Closeable {
|
|||
// "now", 2000 sec before "now", ...:
|
||||
for (int i = 0; i < 100; i++) {
|
||||
Document doc = new Document();
|
||||
long then = nowSec - i * 1000;
|
||||
long then = nowSec - i * 1000L;
|
||||
// Add as doc values field, so we can compute range facets:
|
||||
doc.add(new NumericDocValuesField("timestamp", then));
|
||||
// Add as numeric field so we can drill-down:
|
||||
|
@ -81,7 +81,7 @@ public class RangeFacetsExample implements Closeable {
|
|||
// hour) from "now", 7200 sec (2 hours) from "now", ...:
|
||||
long startTime = 0;
|
||||
for (int i = 0; i < 168; i++) {
|
||||
long endTime = (i + 1) * 3600;
|
||||
long endTime = (i + 1) * 3600L;
|
||||
// Choose a relatively large number, e,g., "35", to create variation in count for
|
||||
// the top n children, so that calling getTopChildren(10) can return top 10 children with
|
||||
// different counts
|
||||
|
|
|
@ -65,7 +65,7 @@ public class KnnVectorDict implements Closeable {
|
|||
long size = vectors.length();
|
||||
vectors.seek(size - Integer.BYTES);
|
||||
dimension = vectors.readInt();
|
||||
if ((size - Integer.BYTES) % (dimension * Float.BYTES) != 0) {
|
||||
if ((size - Integer.BYTES) % (dimension * (long) Float.BYTES) != 0) {
|
||||
throw new IllegalStateException(
|
||||
"vector file size " + size + " is not consonant with the vector dimension " + dimension);
|
||||
}
|
||||
|
|
|
@ -149,7 +149,7 @@ public final class UTF8TaxonomyWriterCache implements TaxonomyWriterCache, Accou
|
|||
|
||||
@Override
|
||||
public synchronized long ramBytesUsed() {
|
||||
return bytesUsed.get() + pageCount * PAGE_SIZE * Integer.BYTES;
|
||||
return bytesUsed.get() + pageCount * (long) PAGE_SIZE * Integer.BYTES;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -343,7 +343,7 @@ public final class SearchImpl extends LukeModel implements Search {
|
|||
|
||||
if (totalHits.value == 0
|
||||
|| (totalHits.relation == TotalHits.Relation.EQUAL_TO
|
||||
&& currentPage * pageSize >= totalHits.value)) {
|
||||
&& currentPage * (long) pageSize >= totalHits.value)) {
|
||||
log.warning("No more next search results are available.");
|
||||
return Optional.empty();
|
||||
}
|
||||
|
|
|
@ -234,8 +234,8 @@ public class MemoryIndex {
|
|||
final int maxBufferedByteBlocks = (int) ((maxReusedBytes / 2) / ByteBlockPool.BYTE_BLOCK_SIZE);
|
||||
final int maxBufferedIntBlocks =
|
||||
(int)
|
||||
((maxReusedBytes - (maxBufferedByteBlocks * ByteBlockPool.BYTE_BLOCK_SIZE))
|
||||
/ (IntBlockPool.INT_BLOCK_SIZE * Integer.BYTES));
|
||||
((maxReusedBytes - (maxBufferedByteBlocks * (long) ByteBlockPool.BYTE_BLOCK_SIZE))
|
||||
/ (IntBlockPool.INT_BLOCK_SIZE * (long) Integer.BYTES));
|
||||
assert (maxBufferedByteBlocks * ByteBlockPool.BYTE_BLOCK_SIZE)
|
||||
+ (maxBufferedIntBlocks * IntBlockPool.INT_BLOCK_SIZE * Integer.BYTES)
|
||||
<= maxReusedBytes;
|
||||
|
|
|
@ -217,7 +217,7 @@ public final class ListOfOutputs<T> extends Outputs<Object> {
|
|||
bytes += outputs.ramBytesUsed(_output);
|
||||
}
|
||||
// 2 * to allow for ArrayList's oversizing:
|
||||
bytes += 2 * outputList.size() * RamUsageEstimator.NUM_BYTES_OBJECT_REF;
|
||||
bytes += 2L * outputList.size() * RamUsageEstimator.NUM_BYTES_OBJECT_REF;
|
||||
} else {
|
||||
bytes += outputs.ramBytesUsed((T) output);
|
||||
}
|
||||
|
|
|
@ -62,7 +62,7 @@ class PreCopyMergedSegmentWarmer implements IndexReaderWarmer {
|
|||
Locale.ROOT,
|
||||
"top: done warm merge " + info + ": took %.3f sec, %.1f MB",
|
||||
(System.nanoTime() - startNS) / (double) TimeUnit.SECONDS.toNanos(1),
|
||||
info.sizeInBytes() / 1024 / 1024.));
|
||||
info.sizeInBytes() / 1024. / 1024.));
|
||||
primary.finishedMergedFiles.addAll(filesMetaData.keySet());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -304,7 +304,7 @@ public final class NRTSuggester implements Accountable {
|
|||
*/
|
||||
private int getMaxTopNSearcherQueueSize(
|
||||
int topN, int numDocs, double liveDocsRatio, boolean filterEnabled) {
|
||||
long maxQueueSize = topN * maxAnalyzedPathsPerOutput;
|
||||
long maxQueueSize = topN * (long) maxAnalyzedPathsPerOutput;
|
||||
// liveDocRatio can be at most 1.0 (if no docs were deleted)
|
||||
assert liveDocsRatio <= 1.0d;
|
||||
maxQueueSize = (long) (maxQueueSize / liveDocsRatio);
|
||||
|
|
|
@ -57,7 +57,7 @@ public class TernaryTreeNode {
|
|||
mem +=
|
||||
RamUsageEstimator.shallowSizeOf(token)
|
||||
+ RamUsageEstimator.NUM_BYTES_ARRAY_HEADER
|
||||
+ Character.BYTES * token.length();
|
||||
+ Character.BYTES * (long) token.length();
|
||||
}
|
||||
mem += RamUsageEstimator.shallowSizeOf(val);
|
||||
return mem;
|
||||
|
|
|
@ -177,7 +177,7 @@ public abstract class BaseRangeFieldQueryTestCase extends LuceneTestCase {
|
|||
} else {
|
||||
for (int m = 0, even = dimensions % 2; m < dimensions * 2; ++m) {
|
||||
if (x == m) {
|
||||
int d = (int) Math.floor(m / 2);
|
||||
int d = (int) Math.floor(m / 2f);
|
||||
// current could be multivalue but old may not be, so use first box
|
||||
if (even == 0) { // even is min
|
||||
ranges[id][0].setMin(d, ranges[oldID][0].getMin(d));
|
||||
|
|
|
@ -444,7 +444,7 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase {
|
|||
longs[i] = random().nextBoolean() ? Long.MIN_VALUE : Long.MAX_VALUE;
|
||||
break;
|
||||
case 2:
|
||||
longs[i] = (random().nextBoolean() ? -1 : 1) * random().nextInt(1024);
|
||||
longs[i] = (random().nextBoolean() ? -1 : 1) * (long) random().nextInt(1024);
|
||||
break;
|
||||
default:
|
||||
throw new AssertionError();
|
||||
|
@ -685,8 +685,8 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase {
|
|||
o.close();
|
||||
|
||||
IndexInput i = dir.openInput("out", newIOContext(random()));
|
||||
i.seek(2 * bufferLength - 1);
|
||||
i.seek(3 * bufferLength);
|
||||
i.seek(2L * bufferLength - 1);
|
||||
i.seek(3L * bufferLength);
|
||||
i.seek(bufferLength);
|
||||
i.readBytes(bytes, 0, 2 * bufferLength);
|
||||
i.close();
|
||||
|
@ -956,15 +956,15 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase {
|
|||
IndexInput input = dir.openInput("longs", newIOContext(random()));
|
||||
RandomAccessInput slice = input.randomAccessSlice(0, input.length());
|
||||
for (int i = 0; i < longs.length; i++) {
|
||||
assertEquals(longs[i], slice.readLong(i * 8));
|
||||
assertEquals(longs[i], slice.readLong(i * 8L));
|
||||
}
|
||||
|
||||
// subslices
|
||||
for (int i = 1; i < longs.length; i++) {
|
||||
long offset = i * 8;
|
||||
long offset = i * 8L;
|
||||
RandomAccessInput subslice = input.randomAccessSlice(offset, input.length() - offset);
|
||||
for (int j = i; j < longs.length; j++) {
|
||||
assertEquals(longs[j], subslice.readLong((j - i) * 8));
|
||||
assertEquals(longs[j], subslice.readLong((j - i) * 8L));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -981,7 +981,7 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase {
|
|||
IndexInput padded = dir.openInput(name, newIOContext(random()));
|
||||
RandomAccessInput whole = padded.randomAccessSlice(i, padded.length() - i);
|
||||
for (int j = 0; j < longs.length; j++) {
|
||||
assertEquals(longs[j], whole.readLong(j * 8));
|
||||
assertEquals(longs[j], whole.readLong(j * 8L));
|
||||
}
|
||||
padded.close();
|
||||
}
|
||||
|
@ -1005,15 +1005,15 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase {
|
|||
IndexInput input = dir.openInput("ints", newIOContext(random()));
|
||||
RandomAccessInput slice = input.randomAccessSlice(0, input.length());
|
||||
for (int i = 0; i < ints.length; i++) {
|
||||
assertEquals(ints[i], slice.readInt(i * 4));
|
||||
assertEquals(ints[i], slice.readInt(i * 4L));
|
||||
}
|
||||
|
||||
// subslices
|
||||
for (int i = 1; i < ints.length; i++) {
|
||||
long offset = i * 4;
|
||||
long offset = i * 4L;
|
||||
RandomAccessInput subslice = input.randomAccessSlice(offset, input.length() - offset);
|
||||
for (int j = i; j < ints.length; j++) {
|
||||
assertEquals(ints[j], subslice.readInt((j - i) * 4));
|
||||
assertEquals(ints[j], subslice.readInt((j - i) * 4L));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1030,7 +1030,7 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase {
|
|||
IndexInput padded = dir.openInput(name, newIOContext(random()));
|
||||
RandomAccessInput whole = padded.randomAccessSlice(i, padded.length() - i);
|
||||
for (int j = 0; j < ints.length; j++) {
|
||||
assertEquals(ints[j], whole.readInt(j * 4));
|
||||
assertEquals(ints[j], whole.readInt(j * 4L));
|
||||
}
|
||||
padded.close();
|
||||
}
|
||||
|
@ -1053,15 +1053,15 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase {
|
|||
IndexInput input = dir.openInput("shorts", newIOContext(random()));
|
||||
RandomAccessInput slice = input.randomAccessSlice(0, input.length());
|
||||
for (int i = 0; i < shorts.length; i++) {
|
||||
assertEquals(shorts[i], slice.readShort(i * 2));
|
||||
assertEquals(shorts[i], slice.readShort(i * 2L));
|
||||
}
|
||||
|
||||
// subslices
|
||||
for (int i = 1; i < shorts.length; i++) {
|
||||
long offset = i * 2;
|
||||
long offset = i * 2L;
|
||||
RandomAccessInput subslice = input.randomAccessSlice(offset, input.length() - offset);
|
||||
for (int j = i; j < shorts.length; j++) {
|
||||
assertEquals(shorts[j], subslice.readShort((j - i) * 2));
|
||||
assertEquals(shorts[j], subslice.readShort((j - i) * 2L));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1078,7 +1078,7 @@ public abstract class BaseDirectoryTestCase extends LuceneTestCase {
|
|||
IndexInput padded = dir.openInput(name, newIOContext(random()));
|
||||
RandomAccessInput whole = padded.randomAccessSlice(i, padded.length() - i);
|
||||
for (int j = 0; j < shorts.length; j++) {
|
||||
assertEquals(shorts[j], whole.readShort(j * 2));
|
||||
assertEquals(shorts[j], whole.readShort(j * 2L));
|
||||
}
|
||||
padded.close();
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue