mirror of https://github.com/apache/druid.git
Fix potential integer overflow issues (#9609)
ApproximateHistogram - seems unlikely SegmentAnalyzer - unclear if this is an actual issue GenericIndexedWriter - unclear if this is an actual issue IncrementalIndexRow and OnheapIncrementalIndex are non-issues becaus it's very unlikely for the number of dims to be large enough to hit the overflow condition
This commit is contained in:
parent
22d3eed80c
commit
332ca19621
|
@ -1557,7 +1557,7 @@ public class ApproximateHistogram
|
|||
final double s = probabilities[j] * this.count();
|
||||
|
||||
int i = 0;
|
||||
int sum = 0;
|
||||
long sum = 0;
|
||||
int k = 1;
|
||||
long count;
|
||||
while (k <= this.binCount()) {
|
||||
|
|
|
@ -207,8 +207,8 @@ public class SegmentAnalyzer
|
|||
for (int i = 0; i < cardinality; ++i) {
|
||||
String value = bitmapIndex.getValue(i);
|
||||
if (value != null) {
|
||||
size += StringUtils.estimatedBinaryLengthAsUTF8(value) * bitmapIndex.getBitmap(bitmapIndex.getIndex(value))
|
||||
.size();
|
||||
size += StringUtils.estimatedBinaryLengthAsUTF8(value) *
|
||||
((long) bitmapIndex.getBitmap(bitmapIndex.getIndex(value)).size());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -321,10 +321,10 @@ public class GenericIndexedWriter<T> implements Serializer
|
|||
final long numBytesWritten = headerOut.size() + valuesOut.size();
|
||||
|
||||
Preconditions.checkState(
|
||||
headerOut.size() == (numWritten * 4),
|
||||
headerOut.size() == (numWritten * 4L),
|
||||
"numWritten[%s] number of rows should have [%s] bytes written to headerOut, had[%s]",
|
||||
numWritten,
|
||||
numWritten * 4,
|
||||
numWritten * 4L,
|
||||
headerOut.size()
|
||||
);
|
||||
Preconditions.checkState(
|
||||
|
@ -459,7 +459,7 @@ public class GenericIndexedWriter<T> implements Serializer
|
|||
long relativeRefBytes = 0;
|
||||
long relativeNumBytes;
|
||||
try (SmooshedWriter smooshChannel = smoosher
|
||||
.addWithSmooshedWriter(generateHeaderFileName(filenameBase), numWritten * Integer.BYTES)) {
|
||||
.addWithSmooshedWriter(generateHeaderFileName(filenameBase), ((long) numWritten) * Integer.BYTES)) {
|
||||
|
||||
// following block converts long header indexes into int header indexes.
|
||||
for (int pos = 0; pos < numWritten; pos++) {
|
||||
|
|
|
@ -126,7 +126,7 @@ public final class IncrementalIndexRow
|
|||
*/
|
||||
public long estimateBytesInMemory()
|
||||
{
|
||||
long sizeInBytes = Long.BYTES + Integer.BYTES * dims.length + Long.BYTES + Long.BYTES;
|
||||
long sizeInBytes = Long.BYTES + ((long) Integer.BYTES) * dims.length + Long.BYTES + Long.BYTES;
|
||||
sizeInBytes += dimsKeySize;
|
||||
return sizeInBytes;
|
||||
}
|
||||
|
|
|
@ -105,10 +105,10 @@ public class OnheapIncrementalIndex extends IncrementalIndex<Aggregator>
|
|||
*/
|
||||
private static long getMaxBytesPerRowForAggregators(IncrementalIndexSchema incrementalIndexSchema)
|
||||
{
|
||||
long maxAggregatorIntermediateSize = Integer.BYTES * incrementalIndexSchema.getMetrics().length;
|
||||
long maxAggregatorIntermediateSize = ((long) Integer.BYTES) * incrementalIndexSchema.getMetrics().length;
|
||||
maxAggregatorIntermediateSize += Arrays.stream(incrementalIndexSchema.getMetrics())
|
||||
.mapToLong(aggregator -> aggregator.getMaxIntermediateSizeWithNulls()
|
||||
+ Long.BYTES * 2)
|
||||
+ Long.BYTES * 2L)
|
||||
.sum();
|
||||
return maxAggregatorIntermediateSize;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue