Fix potential integer overflow issues (#9609)

ApproximateHistogram - seems unlikely
SegmentAnalyzer - unclear if this is an actual issue
GenericIndexedWriter - unclear if this is an actual issue
IncrementalIndexRow and OnheapIncrementalIndex are non-issues becaus it's very
unlikely for the number of dims to be large enough to hit the overflow
condition
This commit is contained in:
Suneet Saldanha 2020-04-10 11:47:08 -07:00 committed by GitHub
parent 22d3eed80c
commit 332ca19621
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 9 additions and 9 deletions

View File

@ -1557,7 +1557,7 @@ public class ApproximateHistogram
final double s = probabilities[j] * this.count();
int i = 0;
int sum = 0;
long sum = 0;
int k = 1;
long count;
while (k <= this.binCount()) {

View File

@ -207,8 +207,8 @@ public class SegmentAnalyzer
for (int i = 0; i < cardinality; ++i) {
String value = bitmapIndex.getValue(i);
if (value != null) {
size += StringUtils.estimatedBinaryLengthAsUTF8(value) * bitmapIndex.getBitmap(bitmapIndex.getIndex(value))
.size();
size += StringUtils.estimatedBinaryLengthAsUTF8(value) *
((long) bitmapIndex.getBitmap(bitmapIndex.getIndex(value)).size());
}
}
}

View File

@ -321,10 +321,10 @@ public class GenericIndexedWriter<T> implements Serializer
final long numBytesWritten = headerOut.size() + valuesOut.size();
Preconditions.checkState(
headerOut.size() == (numWritten * 4),
headerOut.size() == (numWritten * 4L),
"numWritten[%s] number of rows should have [%s] bytes written to headerOut, had[%s]",
numWritten,
numWritten * 4,
numWritten * 4L,
headerOut.size()
);
Preconditions.checkState(
@ -459,7 +459,7 @@ public class GenericIndexedWriter<T> implements Serializer
long relativeRefBytes = 0;
long relativeNumBytes;
try (SmooshedWriter smooshChannel = smoosher
.addWithSmooshedWriter(generateHeaderFileName(filenameBase), numWritten * Integer.BYTES)) {
.addWithSmooshedWriter(generateHeaderFileName(filenameBase), ((long) numWritten) * Integer.BYTES)) {
// following block converts long header indexes into int header indexes.
for (int pos = 0; pos < numWritten; pos++) {

View File

@ -126,7 +126,7 @@ public final class IncrementalIndexRow
*/
public long estimateBytesInMemory()
{
long sizeInBytes = Long.BYTES + Integer.BYTES * dims.length + Long.BYTES + Long.BYTES;
long sizeInBytes = Long.BYTES + ((long) Integer.BYTES) * dims.length + Long.BYTES + Long.BYTES;
sizeInBytes += dimsKeySize;
return sizeInBytes;
}

View File

@ -105,10 +105,10 @@ public class OnheapIncrementalIndex extends IncrementalIndex<Aggregator>
*/
private static long getMaxBytesPerRowForAggregators(IncrementalIndexSchema incrementalIndexSchema)
{
long maxAggregatorIntermediateSize = Integer.BYTES * incrementalIndexSchema.getMetrics().length;
long maxAggregatorIntermediateSize = ((long) Integer.BYTES) * incrementalIndexSchema.getMetrics().length;
maxAggregatorIntermediateSize += Arrays.stream(incrementalIndexSchema.getMetrics())
.mapToLong(aggregator -> aggregator.getMaxIntermediateSizeWithNulls()
+ Long.BYTES * 2)
+ Long.BYTES * 2L)
.sum();
return maxAggregatorIntermediateSize;
}