SQL: Support for descending timeseries. (#3790)

This commit is contained in:
Gian Merlino 2016-12-19 11:19:15 -08:00 committed by Fangjin Yang
parent f576a0ff14
commit ebb4952f3f
3 changed files with 49 additions and 4 deletions

View File

@ -269,8 +269,7 @@ public class DruidQueryBuilder
limitSpec == null || limitSpec.getColumns().isEmpty()
|| (limitSpec.getLimit() == Integer.MAX_VALUE
&& limitSpec.getColumns().size() == 1
&& limitSpec.getColumns().get(0).getDimension().equals(dimensionSpec.getOutputName())
&& limitSpec.getColumns().get(0).getDirection() == OrderByColumnSpec.Direction.ASCENDING);
&& limitSpec.getColumns().get(0).getDimension().equals(dimensionSpec.getOutputName()));
if (sortingOnTime) {
return ExtractionFns.toQueryGranularity(dimensionSpec.getExtractionFn());

View File

@ -38,6 +38,7 @@ import io.druid.query.Result;
import io.druid.query.dimension.DimensionSpec;
import io.druid.query.groupby.GroupByQuery;
import io.druid.query.groupby.having.DimFilterHavingSpec;
import io.druid.query.groupby.orderby.DefaultLimitSpec;
import io.druid.query.groupby.orderby.OrderByColumnSpec;
import io.druid.query.select.EventHolder;
import io.druid.query.select.PagingSpec;
@ -217,13 +218,26 @@ public class QueryMaker
final Row.RowBuilder rowBuilder = Row.newBuilder(fieldList.size());
final Filtration filtration = Filtration.create(queryBuilder.getFilter()).optimize(druidTable);
final boolean descending;
if (queryBuilder.getLimitSpec() != null) {
final DefaultLimitSpec limitSpec = queryBuilder.getLimitSpec();
// Sanity checks; these preconditions should be assured by DruidQueryBuilder.accumulate.
Preconditions.checkState(limitSpec.getColumns().size() == 1);
Preconditions.checkState(limitSpec.getColumns().get(0).getDimension().equals(timeOutputName));
descending = limitSpec.getColumns().get(0).getDirection() == OrderByColumnSpec.Direction.DESCENDING;
} else {
descending = false;
}
final Map<String, Object> context = Maps.newHashMap();
context.put("skipEmptyBuckets", true);
final TimeseriesQuery query = new TimeseriesQuery(
druidTable.getDataSource(),
filtration.getQuerySegmentSpec(),
false,
descending,
filtration.getDimFilter(),
queryGranularity,
queryBuilder.getGrouping().getAggregatorFactories(),

View File

@ -1950,7 +1950,12 @@ public class CalciteQueryTest
public void testTimeseries() throws Exception
{
testQuery(
"SELECT gran, SUM(cnt) FROM (SELECT floor(__time TO month) AS gran, cnt FROM druid.foo) AS x GROUP BY gran ORDER BY gran",
"SELECT gran, SUM(cnt) FROM (\n"
+ " SELECT floor(__time TO month) AS gran,\n"
+ " cnt FROM druid.foo\n"
+ ") AS x\n"
+ "GROUP BY gran\n"
+ "ORDER BY gran",
ImmutableList.<Query>of(
Druids.newTimeseriesQueryBuilder()
.dataSource(CalciteTests.DATASOURCE)
@ -1967,6 +1972,33 @@ public class CalciteQueryTest
);
}
@Test
public void testTimeseriesDescending() throws Exception
{
testQuery(
"SELECT gran, SUM(cnt) FROM (\n"
+ " SELECT floor(__time TO month) AS gran,\n"
+ " cnt FROM druid.foo\n"
+ ") AS x\n"
+ "GROUP BY gran\n"
+ "ORDER BY gran DESC",
ImmutableList.<Query>of(
Druids.newTimeseriesQueryBuilder()
.dataSource(CalciteTests.DATASOURCE)
.intervals(QSS(Filtration.eternity()))
.granularity(QueryGranularities.MONTH)
.aggregators(AGGS(new LongSumAggregatorFactory("a0", "cnt")))
.descending(true)
.context(TIMESERIES_CONTEXT)
.build()
),
ImmutableList.of(
new Object[]{T("2001-01-01"), 3L},
new Object[]{T("2000-01-01"), 3L}
)
);
}
@Test
public void testGroupByExtractYear() throws Exception
{