Allow queries with no aggregators. (#3216)

This is actually reasonable for a groupBy or lexicographic topNs that is
being used to do a "COUNT DISTINCT" kind of query. No aggregators are
needed for that query, and including a dummy aggregator wastes 8 bytes
per row.

It's kind of silly for timeseries, but why not.
This commit is contained in:
Gian Merlino 2016-07-06 08:08:54 -07:00 committed by Nishant
parent bfa5c05aaa
commit fdc7e88a7d
11 changed files with 111 additions and 9 deletions

View File

@ -67,7 +67,7 @@ There are 11 main parts to a groupBy query:
|having|See [Having](../querying/having.html).|no|
|granularity|Defines the granularity of the query. See [Granularities](../querying/granularities.html)|yes|
|filter|See [Filters](../querying/filters.html)|no|
|aggregations|See [Aggregations](../querying/aggregations.html)|yes|
|aggregations|See [Aggregations](../querying/aggregations.html)|no|
|postAggregations|See [Post Aggregations](../querying/post-aggregations.html)|no|
|intervals|A JSON Object representing ISO-8601 Intervals. This defines the time ranges to run the query over.|yes|
|context|An additional JSON Object which can be used to specify certain flags.|no|

View File

@ -54,7 +54,7 @@ There are 7 main parts to a timeseries query:
|intervals|A JSON Object representing ISO-8601 Intervals. This defines the time ranges to run the query over.|yes|
|granularity|Defines the granularity to bucket query results. See [Granularities](../querying/granularities.html)|yes|
|filter|See [Filters](../querying/filters.html)|no|
|aggregations|See [Aggregations](../querying/aggregations.html)|yes|
|aggregations|See [Aggregations](../querying/aggregations.html)|no|
|postAggregations|See [Post Aggregations](../querying/post-aggregations.html)|no|
|context|See [Context](../querying/query-context.html)|no|

View File

@ -79,7 +79,7 @@ There are 11 parts to a topN query.
|intervals|A JSON Object representing ISO-8601 Intervals. This defines the time ranges to run the query over.|yes|
|granularity|Defines the granularity to bucket query results. See [Granularities](../querying/granularities.html)|yes|
|filter|See [Filters](../querying/filters.html)|no|
|aggregations|See [Aggregations](../querying/aggregations.html)|yes|
|aggregations|See [Aggregations](../querying/aggregations.html)|no|
|postAggregations|See [Post Aggregations](../querying/post-aggregations.html)|no|
|dimension|A String or JSON object defining the dimension that you want the top taken for. For more info, see [DimensionSpecs](../querying/dimensionspecs.html)|yes|
|threshold|An integer defining the N in the topN (i.e. how many results you want in the top list)|yes|

View File

@ -37,7 +37,6 @@ public class Queries
)
{
Preconditions.checkNotNull(aggFactories, "aggregations cannot be null");
Preconditions.checkArgument(aggFactories.size() > 0, "Must have at least one AggregatorFactory");
final Set<String> aggNames = Sets.newHashSet();
for (AggregatorFactory aggFactory : aggFactories) {

View File

@ -98,13 +98,12 @@ public class GroupByQuery extends BaseQuery<Row>
for (DimensionSpec spec : this.dimensions) {
Preconditions.checkArgument(spec != null, "dimensions has null DimensionSpec");
}
this.aggregatorSpecs = aggregatorSpecs;
this.aggregatorSpecs = aggregatorSpecs == null ? ImmutableList.<AggregatorFactory>of() : aggregatorSpecs;
this.postAggregatorSpecs = postAggregatorSpecs == null ? ImmutableList.<PostAggregator>of() : postAggregatorSpecs;
this.havingSpec = havingSpec;
this.limitSpec = (limitSpec == null) ? new NoopLimitSpec() : limitSpec;
Preconditions.checkNotNull(this.granularity, "Must specify a granularity");
Preconditions.checkNotNull(this.aggregatorSpecs, "Must specify at least one aggregator");
Queries.verifyAggregations(this.aggregatorSpecs, this.postAggregatorSpecs);
Function<Sequence<Row>, Sequence<Row>> postProcFn =

View File

@ -62,7 +62,7 @@ public class TimeseriesQuery extends BaseQuery<Result<TimeseriesResultValue>>
super(dataSource, querySegmentSpec, descending, context);
this.dimFilter = dimFilter;
this.granularity = granularity;
this.aggregatorSpecs = aggregatorSpecs;
this.aggregatorSpecs = aggregatorSpecs == null ? ImmutableList.<AggregatorFactory>of() : aggregatorSpecs;
this.postAggregatorSpecs = postAggregatorSpecs == null ? ImmutableList.<PostAggregator>of() : postAggregatorSpecs;
Queries.verifyAggregations(this.aggregatorSpecs, this.postAggregatorSpecs);

View File

@ -97,7 +97,7 @@ public class PooledTopNAlgorithm
numBytesPerRecord += aggregatorSizes[i];
}
final int numValuesPerPass = numBytesToWorkWith / numBytesPerRecord;
final int numValuesPerPass = numBytesPerRecord > 0 ? numBytesToWorkWith / numBytesPerRecord : cardinality;
return PooledTopNParams.builder()
.withDimSelector(dimSelector)

View File

@ -73,7 +73,7 @@ public class TopNQuery extends BaseQuery<Result<TopNResultValue>>
this.dimFilter = dimFilter;
this.granularity = granularity;
this.aggregatorSpecs = aggregatorSpecs;
this.aggregatorSpecs = aggregatorSpecs == null ? ImmutableList.<AggregatorFactory>of() : aggregatorSpecs;
this.postAggregatorSpecs = postAggregatorSpecs == null ? ImmutableList.<PostAggregator>of() : postAggregatorSpecs;
Preconditions.checkNotNull(dimensionSpec, "dimensionSpec can't be null");

View File

@ -333,6 +333,43 @@ public class GroupByQueryRunnerTest
TestHelper.assertExpectedObjects(expectedResults, results, "");
}
@Test
public void testGroupByNoAggregators()
{
GroupByQuery query = GroupByQuery
.builder()
.setDataSource(QueryRunnerTestHelper.dataSource)
.setQuerySegmentSpec(QueryRunnerTestHelper.firstToThird)
.setDimensions(Lists.<DimensionSpec>newArrayList(new DefaultDimensionSpec("quality", "alias")))
.setGranularity(QueryRunnerTestHelper.dayGran)
.build();
List<Row> expectedResults = Arrays.asList(
GroupByQueryRunnerTestHelper.createExpectedRow("2011-04-01", "alias", "automotive"),
GroupByQueryRunnerTestHelper.createExpectedRow("2011-04-01", "alias", "business"),
GroupByQueryRunnerTestHelper.createExpectedRow("2011-04-01", "alias", "entertainment"),
GroupByQueryRunnerTestHelper.createExpectedRow("2011-04-01", "alias", "health"),
GroupByQueryRunnerTestHelper.createExpectedRow("2011-04-01", "alias", "mezzanine"),
GroupByQueryRunnerTestHelper.createExpectedRow("2011-04-01", "alias", "news"),
GroupByQueryRunnerTestHelper.createExpectedRow("2011-04-01", "alias", "premium"),
GroupByQueryRunnerTestHelper.createExpectedRow("2011-04-01", "alias", "technology"),
GroupByQueryRunnerTestHelper.createExpectedRow("2011-04-01", "alias", "travel"),
GroupByQueryRunnerTestHelper.createExpectedRow("2011-04-02", "alias", "automotive"),
GroupByQueryRunnerTestHelper.createExpectedRow("2011-04-02", "alias", "business"),
GroupByQueryRunnerTestHelper.createExpectedRow("2011-04-02", "alias", "entertainment"),
GroupByQueryRunnerTestHelper.createExpectedRow("2011-04-02", "alias", "health"),
GroupByQueryRunnerTestHelper.createExpectedRow("2011-04-02", "alias", "mezzanine"),
GroupByQueryRunnerTestHelper.createExpectedRow("2011-04-02", "alias", "news"),
GroupByQueryRunnerTestHelper.createExpectedRow("2011-04-02", "alias", "premium"),
GroupByQueryRunnerTestHelper.createExpectedRow("2011-04-02", "alias", "technology"),
GroupByQueryRunnerTestHelper.createExpectedRow("2011-04-02", "alias", "travel")
);
Iterable<Row> results = GroupByQueryRunnerTestHelper.runQuery(factory, runner, query);
TestHelper.assertExpectedObjects(expectedResults, results, "");
}
@Test
public void testMultiValueDimension()
{

View File

@ -183,6 +183,40 @@ public class TimeseriesQueryRunnerTest
Assert.assertEquals(lastResult.toString(), expectedLast, lastResult.getTimestamp());
}
@Test
public void testTimeseriesNoAggregators()
{
QueryGranularity gran = QueryGranularities.DAY;
TimeseriesQuery query = Druids.newTimeseriesQueryBuilder()
.dataSource(QueryRunnerTestHelper.dataSource)
.granularity(gran)
.intervals(QueryRunnerTestHelper.fullOnInterval)
.descending(descending)
.build();
Iterable<Result<TimeseriesResultValue>> results = Sequences.toList(
runner.run(query, CONTEXT),
Lists.<Result<TimeseriesResultValue>>newArrayList()
);
final DateTime expectedLast = descending ?
QueryRunnerTestHelper.earliest :
QueryRunnerTestHelper.last;
Result lastResult = null;
for (Result<TimeseriesResultValue> result : results) {
DateTime current = result.getTimestamp();
Assert.assertFalse(
String.format("Timestamp[%s] > expectedLast[%s]", current, expectedLast),
descending ? current.isBefore(expectedLast) : current.isAfter(expectedLast)
);
Assert.assertEquals(ImmutableMap.of(), result.getValue().getBaseObject());
lastResult = result;
}
Assert.assertEquals(lastResult.toString(), expectedLast, lastResult.getTimestamp());
}
@Test
public void testFullOnTimeseriesMaxMin()
{

View File

@ -1339,6 +1339,39 @@ public class TopNQueryRunnerTest
assertExpectedResults(expectedResults, query);
}
@Test
public void testTopNLexicographicNoAggregators()
{
TopNQuery query = new TopNQueryBuilder()
.dataSource(QueryRunnerTestHelper.dataSource)
.granularity(QueryRunnerTestHelper.allGran)
.dimension(QueryRunnerTestHelper.marketDimension)
.metric(new LexicographicTopNMetricSpec(""))
.threshold(4)
.intervals(QueryRunnerTestHelper.firstToThird)
.build();
List<Result<TopNResultValue>> expectedResults = Arrays.asList(
new Result<>(
new DateTime("2011-04-01T00:00:00.000Z"),
new TopNResultValue(
Arrays.<Map<String, Object>>asList(
ImmutableMap.<String, Object>of(
QueryRunnerTestHelper.marketDimension, "spot"
),
ImmutableMap.<String, Object>of(
QueryRunnerTestHelper.marketDimension, "total_market"
),
ImmutableMap.<String, Object>of(
QueryRunnerTestHelper.marketDimension, "upfront"
)
)
)
)
);
assertExpectedResults(expectedResults, query);
}
@Test
public void testTopNLexicographicWithPreviousStop()
{