SegmentMetadataQuery: Fix default interval handling. (#5489)

* SegmentMetadataQuery: Fix default interval handling.

PR #4131 introduced a new copy builder for segmentMetadata that did
not retain the value of usingDefaultInterval. This led to it being
dropped and the default-interval handling not working as expected.
Instead of using the default 1 week history when intervals are not
provided, the segmentMetadata query would query _all_ segments,
incurring an unexpected performance hit.

This patch fixes the bug and adds a test for the copy builder.

* Intervals
This commit is contained in:
Gian Merlino 2018-03-15 10:05:46 -07:00 committed by GitHub
parent 7d1163b0d9
commit 16b81fcd53
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 49 additions and 1 deletions

View File

@ -591,6 +591,7 @@ public class Druids
private EnumSet<SegmentMetadataQuery.AnalysisType> analysisTypes;
private Boolean merge;
private Boolean lenientAggregatorMerge;
private Boolean usingDefaultInterval;
private Map<String, Object> context;
public SegmentMetadataQueryBuilder()
@ -601,6 +602,7 @@ public class Druids
analysisTypes = null;
merge = null;
lenientAggregatorMerge = null;
usingDefaultInterval = null;
context = null;
}
@ -613,7 +615,7 @@ public class Druids
merge,
context,
analysisTypes,
false,
usingDefaultInterval,
lenientAggregatorMerge
);
}
@ -627,6 +629,7 @@ public class Druids
.analysisTypes(query.getAnalysisTypes())
.merge(query.isMerge())
.lenientAggregatorMerge(query.isLenientAggregatorMerge())
.usingDefaultInterval(query.isUsingDefaultInterval())
.context(query.getContext());
}
@ -696,6 +699,12 @@ public class Druids
return this;
}
public SegmentMetadataQueryBuilder usingDefaultInterval(boolean usingDefaultInterval)
{
this.usingDefaultInterval = usingDefaultInterval;
return this;
}
public SegmentMetadataQueryBuilder context(Map<String, Object> c)
{
context = c;

View File

@ -27,6 +27,7 @@ import com.google.common.collect.Maps;
import io.druid.jackson.DefaultObjectMapper;
import io.druid.java.util.common.Intervals;
import io.druid.query.CacheStrategy;
import io.druid.query.Druids;
import io.druid.query.TableDataSource;
import io.druid.query.aggregation.AggregatorFactory;
import io.druid.query.aggregation.DoubleMaxAggregatorFactory;
@ -38,10 +39,14 @@ import io.druid.query.metadata.metadata.SegmentAnalysis;
import io.druid.query.metadata.metadata.SegmentMetadataQuery;
import io.druid.query.spec.LegacySegmentSpec;
import io.druid.segment.column.ValueType;
import io.druid.timeline.LogicalSegment;
import org.joda.time.Period;
import org.junit.Assert;
import org.junit.Test;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
public class SegmentMetadataQueryQueryToolChestTest
{
@ -271,6 +276,37 @@ public class SegmentMetadataQueryQueryToolChestTest
);
}
@Test
public void testFilterSegments()
{
final SegmentMetadataQueryConfig config = new SegmentMetadataQueryConfig();
final SegmentMetadataQueryQueryToolChest toolChest = new SegmentMetadataQueryQueryToolChest(config);
final List<LogicalSegment> filteredSegments = toolChest.filterSegments(
Druids.newSegmentMetadataQueryBuilder().dataSource("foo").merge(true).build(),
ImmutableList
.of(
"2000-01-01/P1D",
"2000-01-04/P1D",
"2000-01-09/P1D",
"2000-01-09/P1D"
)
.stream()
.map(interval -> (LogicalSegment) () -> Intervals.of(interval))
.collect(Collectors.toList())
);
Assert.assertEquals(Period.weeks(1), config.getDefaultHistory());
Assert.assertEquals(
ImmutableList.of(
Intervals.of("2000-01-04/P1D"),
Intervals.of("2000-01-09/P1D"),
Intervals.of("2000-01-09/P1D")
),
filteredSegments.stream().map(LogicalSegment::getInterval).collect(Collectors.toList())
);
}
@SuppressWarnings("ArgumentParameterSwap")
@Test
public void testMergeRollup()

View File

@ -895,6 +895,9 @@ public class SegmentMetadataQueryTest
// test serialize and deserialize
Assert.assertEquals(query, MAPPER.readValue(MAPPER.writeValueAsString(query), Query.class));
// test copy
Assert.assertEquals(query, Druids.SegmentMetadataQueryBuilder.copy((SegmentMetadataQuery) query).build());
}
@Test