Improve error message for integer overflow in compaction task (#7131)

* improve error message for integer overflow in compaction task

* fix build
This commit is contained in:
Jihoon Son 2019-02-27 19:07:37 -08:00 committed by Benedict Jin
parent 6b232d8195
commit cacdc83cad
3 changed files with 42 additions and 1 deletions

View File

@ -92,6 +92,14 @@ public final class Numbers
}
}
public static int toIntExact(long value, String error)
{
if ((int) value != value) {
throw new ArithmeticException(error);
}
return (int) value;
}
private Numbers()
{
}

View File

@ -54,8 +54,10 @@ import org.apache.druid.indexing.firehose.IngestSegmentFirehoseFactory;
import org.apache.druid.java.util.common.IAE;
import org.apache.druid.java.util.common.ISE;
import org.apache.druid.java.util.common.JodaUtils;
import org.apache.druid.java.util.common.Numbers;
import org.apache.druid.java.util.common.Pair;
import org.apache.druid.java.util.common.RE;
import org.apache.druid.java.util.common.StringUtils;
import org.apache.druid.java.util.common.granularity.Granularities;
import org.apache.druid.java.util.common.granularity.Granularity;
import org.apache.druid.java.util.common.granularity.GranularityType;
@ -845,7 +847,16 @@ public class CompactionTask extends AbstractTask
}
final double avgRowsPerByte = totalNumRows / (double) totalSizeBytes;
final int maxRowsPerSegment = Math.toIntExact(Math.round(avgRowsPerByte * nonNullTargetCompactionSizeBytes));
final long maxRowsPerSegmentLong = Math.round(avgRowsPerByte * nonNullTargetCompactionSizeBytes);
final int maxRowsPerSegment = Numbers.toIntExact(
maxRowsPerSegmentLong,
StringUtils.format(
"Estimated maxRowsPerSegment[%s] is out of integer value range. "
+ "Please consider reducing targetCompactionSizeBytes[%s].",
maxRowsPerSegmentLong,
targetCompactionSizeBytes
)
);
Preconditions.checkState(maxRowsPerSegment > 0, "Negative maxRowsPerSegment[%s]", maxRowsPerSegment);
log.info(

View File

@ -62,6 +62,7 @@ import org.apache.druid.jackson.DefaultObjectMapper;
import org.apache.druid.java.util.common.IAE;
import org.apache.druid.java.util.common.ISE;
import org.apache.druid.java.util.common.Intervals;
import org.apache.druid.java.util.common.Pair;
import org.apache.druid.java.util.common.StringUtils;
import org.apache.druid.java.util.common.granularity.Granularities;
import org.apache.druid.java.util.common.granularity.Granularity;
@ -1135,6 +1136,27 @@ public class CompactionTaskTest
.build();
}
@Test
public void testHugeTargetCompactionSize()
{
final PartitionConfigurationManager manager = new PartitionConfigurationManager(Long.MAX_VALUE, TUNING_CONFIG);
final TestIndexIO indexIO = (TestIndexIO) toolbox.getIndexIO();
final Map<File, QueryableIndex> queryableIndexMap = indexIO.getQueryableIndexMap();
final List<Pair<QueryableIndex, DataSegment>> segments = new ArrayList<>();
for (Entry<DataSegment, File> entry : segmentMap.entrySet()) {
final DataSegment segment = entry.getKey();
final File file = entry.getValue();
segments.add(Pair.of(Preconditions.checkNotNull(queryableIndexMap.get(file)), segment));
}
expectedException.expect(ArithmeticException.class);
expectedException.expectMessage(
CoreMatchers.startsWith("Estimated maxRowsPerSegment[922337203685477632] is out of integer value range.")
);
manager.computeTuningConfig(segments);
}
private static List<DimensionsSpec> getExpectedDimensionsSpecForAutoGeneration(boolean keepSegmentGranularity)
{
if (keepSegmentGranularity) {