mirror of https://github.com/apache/druid.git
Skip compaction for datasources with partial-eternity segments (#15542)
This PR builds on #13304 to skip compaction for datasources with segments that have their interval start or end coinciding with Eternity interval end-points. This is needed in order to prevent an issue similar to #13208 as the Coordinator tries to iterate over a large number of intervals when trying to compact an interval with infinite start or end.
This commit is contained in:
parent
8735d023a1
commit
91ca8e73d6
|
@ -110,9 +110,10 @@ public class NewestSegmentFirstIterator implements CompactionSegmentIterator
|
|||
// For example, if the original is interval of 2020-01-28/2020-02-03 with WEEK granularity
|
||||
// and the configuredSegmentGranularity is MONTH, the segment will be split to two segments
|
||||
// of 2020-01/2020-02 and 2020-02/2020-03.
|
||||
if (Intervals.ETERNITY.equals(segment.getInterval())) {
|
||||
if (Intervals.ETERNITY.getStart().equals(segment.getInterval().getStart())
|
||||
|| Intervals.ETERNITY.getEnd().equals(segment.getInterval().getEnd())) {
|
||||
// This is to prevent the coordinator from crashing as raised in https://github.com/apache/druid/issues/13208
|
||||
log.warn("Cannot compact datasource[%s] with ALL granularity", dataSource);
|
||||
log.warn("Cannot compact datasource[%s] containing segments with partial-ETERNITY intervals", dataSource);
|
||||
return;
|
||||
}
|
||||
for (Interval interval : configuredSegmentGranularity.getIterable(segment.getInterval())) {
|
||||
|
@ -429,8 +430,9 @@ public class NewestSegmentFirstIterator implements CompactionSegmentIterator
|
|||
final List<Interval> searchIntervals = new ArrayList<>();
|
||||
|
||||
for (Interval lookupInterval : filteredInterval) {
|
||||
if (Intervals.ETERNITY.equals(lookupInterval)) {
|
||||
log.warn("Cannot compact datasource[%s] since interval is ETERNITY.", dataSourceName);
|
||||
if (Intervals.ETERNITY.getStart().equals(lookupInterval.getStart())
|
||||
|| Intervals.ETERNITY.getEnd().equals(lookupInterval.getEnd())) {
|
||||
log.warn("Cannot compact datasource[%s] since interval[%s] coincides with ETERNITY.", dataSourceName, lookupInterval);
|
||||
return Collections.emptyList();
|
||||
}
|
||||
final List<DataSegment> segments = timeline
|
||||
|
|
|
@ -1636,6 +1636,70 @@ public class NewestSegmentFirstPolicyTest
|
|||
Assert.assertFalse(iterator.hasNext());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSkipFirstHalfEternityToDefault()
|
||||
{
|
||||
CompactionSegmentIterator iterator = policy.reset(
|
||||
ImmutableMap.of(DATA_SOURCE,
|
||||
createCompactionConfig(10000,
|
||||
new Period("P0D"),
|
||||
null
|
||||
)
|
||||
),
|
||||
ImmutableMap.of(
|
||||
DATA_SOURCE,
|
||||
SegmentTimeline.forSegments(ImmutableSet.of(
|
||||
new DataSegment(
|
||||
DATA_SOURCE,
|
||||
new Interval(DateTimes.MIN, DateTimes.of("2024-01-01")),
|
||||
"0",
|
||||
new HashMap<>(),
|
||||
new ArrayList<>(),
|
||||
new ArrayList<>(),
|
||||
new NumberedShardSpec(0, 0),
|
||||
0,
|
||||
100)
|
||||
)
|
||||
)
|
||||
),
|
||||
Collections.emptyMap()
|
||||
);
|
||||
|
||||
Assert.assertFalse(iterator.hasNext());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSkipSecondHalfOfEternityToDefault()
|
||||
{
|
||||
CompactionSegmentIterator iterator = policy.reset(
|
||||
ImmutableMap.of(DATA_SOURCE,
|
||||
createCompactionConfig(10000,
|
||||
new Period("P0D"),
|
||||
null
|
||||
)
|
||||
),
|
||||
ImmutableMap.of(
|
||||
DATA_SOURCE,
|
||||
SegmentTimeline.forSegments(ImmutableSet.of(
|
||||
new DataSegment(
|
||||
DATA_SOURCE,
|
||||
new Interval(DateTimes.of("2024-01-01"), DateTimes.MAX),
|
||||
"0",
|
||||
new HashMap<>(),
|
||||
new ArrayList<>(),
|
||||
new ArrayList<>(),
|
||||
new NumberedShardSpec(0, 0),
|
||||
0,
|
||||
100)
|
||||
)
|
||||
)
|
||||
),
|
||||
Collections.emptyMap()
|
||||
);
|
||||
|
||||
Assert.assertFalse(iterator.hasNext());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSkipAllToAllGranularity()
|
||||
{
|
||||
|
|
Loading…
Reference in New Issue