This commit is contained in:
Justin Borromeo 2019-03-18 14:00:50 -07:00
parent 7bfa77d3c1
commit 2528a56142
13 changed files with 46 additions and 46 deletions

View File

@ -114,7 +114,7 @@ public class ScanBenchmark
private int limit;
@Param({"NONE", "DESCENDING", "ASCENDING"})
private static ScanQuery.TimeOrder timeOrdering;
private static ScanQuery.Order ordering;
private static final Logger log = new Logger(ScanBenchmark.class);
private static final ObjectMapper JSON_MAPPER;
@ -182,7 +182,7 @@ public class ScanBenchmark
return Druids.newScanQueryBuilder()
.dataSource("blah")
.intervals(intervalSpec)
.timeOrder(timeOrdering);
.timeOrder(ordering);
}
private static Druids.ScanQueryBuilder basicB(final BenchmarkSchemaInfo basicSchema)
@ -203,7 +203,7 @@ public class ScanBenchmark
.filters(filter)
.dataSource("blah")
.intervals(intervalSpec)
.timeOrder(timeOrdering);
.timeOrder(ordering);
}
private static Druids.ScanQueryBuilder basicC(final BenchmarkSchemaInfo basicSchema)
@ -216,7 +216,7 @@ public class ScanBenchmark
.filters(new SelectorDimFilter(dimName, "3", StrlenExtractionFn.instance()))
.intervals(intervalSpec)
.dataSource("blah")
.timeOrder(timeOrdering);
.timeOrder(ordering);
}
private static Druids.ScanQueryBuilder basicD(final BenchmarkSchemaInfo basicSchema)
@ -231,7 +231,7 @@ public class ScanBenchmark
.filters(new BoundDimFilter(dimName, "100", "10000", true, true, true, null, null))
.intervals(intervalSpec)
.dataSource("blah")
.timeOrder(timeOrdering);
.timeOrder(ordering);
}
@Setup

View File

@ -61,7 +61,7 @@ The following are the main parameters for Scan queries:
|columns|A String array of dimensions and metrics to scan. If left empty, all dimensions and metrics are returned.|no|
|batchSize|How many rows buffered before return to client. Default is `20480`|no|
|limit|How many rows to return. If not specified, all rows will be returned.|no|
|timeOrder|The ordering of returned rows based on timestamp. "ascending", "descending", and "none" (default) are supported. Currently, "ascending" and "descending" are only supported for queries where the limit is less than `druid.query.scan.maxRowsTimeOrderedInMemory`. Scan queries that are either legacy mode or have a limit greater than `druid.query.scan.maxRowsTimeOrderedInMemory` will not be time-ordered and default to a timeOrder of "none".|none|
|order|The ordering of returned rows based on timestamp. "ascending", "descending", and "none" (default) are supported. Currently, "ascending" and "descending" are only supported for queries where the limit is less than `druid.query.scan.maxRowsTimeOrderedInMemory`. Scan queries that are either legacy mode or have a limit greater than `druid.query.scan.maxRowsTimeOrderedInMemory` will not be time-ordered and default to a order of "none".|none|
|legacy|Return results consistent with the legacy "scan-query" contrib extension. Defaults to the value set by `druid.query.scan.legacy`, which in turn defaults to false. See [Legacy mode](#legacy-mode) for details.|no|
|context|An additional JSON Object which can be used to specify certain flags.|no|

View File

@ -924,7 +924,7 @@ public class Druids
private DimFilter dimFilter;
private List<String> columns;
private Boolean legacy;
private ScanQuery.TimeOrder timeOrder;
private ScanQuery.Order order;
public ScanQueryBuilder()
{
@ -938,7 +938,7 @@ public class Druids
dimFilter = null;
columns = new ArrayList<>();
legacy = null;
timeOrder = null;
order = null;
}
public ScanQuery build()
@ -950,7 +950,7 @@ public class Druids
resultFormat,
batchSize,
limit,
timeOrder,
order,
dimFilter,
columns,
legacy,
@ -971,7 +971,7 @@ public class Druids
.columns(query.getColumns())
.legacy(query.isLegacy())
.context(query.getContext())
.timeOrder(query.getTimeOrder());
.timeOrder(query.getOrder());
}
public ScanQueryBuilder dataSource(String ds)
@ -1051,9 +1051,9 @@ public class Druids
return this;
}
public ScanQueryBuilder timeOrder(ScanQuery.TimeOrder timeOrder)
public ScanQueryBuilder timeOrder(ScanQuery.Order order)
{
this.timeOrder = timeOrder;
this.order = order;
return this;
}
}

View File

@ -78,7 +78,7 @@ public class ScanQuery extends BaseQuery<ScanResultValue>
}
}
public enum TimeOrder
public enum Order
{
ASCENDING,
DESCENDING,
@ -92,7 +92,7 @@ public class ScanQuery extends BaseQuery<ScanResultValue>
}
@JsonCreator
public static TimeOrder fromString(String name)
public static Order fromString(String name)
{
return valueOf(StringUtils.toUpperCase(name));
}
@ -111,7 +111,7 @@ public class ScanQuery extends BaseQuery<ScanResultValue>
private final DimFilter dimFilter;
private final List<String> columns;
private final Boolean legacy;
private final TimeOrder timeOrder;
private final Order order;
@JsonCreator
public ScanQuery(
@ -121,7 +121,7 @@ public class ScanQuery extends BaseQuery<ScanResultValue>
@JsonProperty("resultFormat") ResultFormat resultFormat,
@JsonProperty("batchSize") int batchSize,
@JsonProperty("limit") long limit,
@JsonProperty("timeOrder") TimeOrder timeOrder,
@JsonProperty("order") Order order,
@JsonProperty("filter") DimFilter dimFilter,
@JsonProperty("columns") List<String> columns,
@JsonProperty("legacy") Boolean legacy,
@ -138,7 +138,7 @@ public class ScanQuery extends BaseQuery<ScanResultValue>
this.dimFilter = dimFilter;
this.columns = columns;
this.legacy = legacy;
this.timeOrder = timeOrder == null ? TimeOrder.NONE : timeOrder;
this.order = order == null ? Order.NONE : order;
}
@JsonProperty
@ -166,9 +166,9 @@ public class ScanQuery extends BaseQuery<ScanResultValue>
}
@JsonProperty
public TimeOrder getTimeOrder()
public Order getOrder()
{
return timeOrder;
return order;
}
@Override

View File

@ -68,7 +68,7 @@ public class ScanQueryEngine
if (responseContext.get(ScanQueryRunnerFactory.CTX_COUNT) != null) {
long count = (long) responseContext.get(ScanQueryRunnerFactory.CTX_COUNT);
if (count >= query.getLimit() && query.getTimeOrder().equals(ScanQuery.TimeOrder.NONE)) {
if (count >= query.getLimit() && query.getOrder().equals(ScanQuery.Order.NONE)) {
return Sequences.empty();
}
}
@ -131,8 +131,8 @@ public class ScanQueryEngine
intervals.get(0),
query.getVirtualColumns(),
Granularities.ALL,
query.getTimeOrder().equals(ScanQuery.TimeOrder.DESCENDING) ||
(query.getTimeOrder().equals(ScanQuery.TimeOrder.NONE) && query.isDescending()),
query.getOrder().equals(ScanQuery.Order.DESCENDING) ||
(query.getOrder().equals(ScanQuery.Order.NONE) && query.isDescending()),
null
)
.map(cursor -> new BaseSequence<>(
@ -264,7 +264,7 @@ public class ScanQueryEngine
*/
private long calculateLimit(ScanQuery query, Map<String, Object> responseContext)
{
if (query.getTimeOrder().equals(ScanQuery.TimeOrder.NONE)) {
if (query.getOrder().equals(ScanQuery.Order.NONE)) {
return query.getLimit() - (long) responseContext.get(ScanQueryRunnerFactory.CTX_COUNT);
}
return query.getLimit();

View File

@ -98,7 +98,7 @@ public class ScanQueryLimitRowIterator implements CloseableIterator<ScanResultVa
}
// We want to perform batching if we are not time-ordering or are at the outer level if we are re time-ordering
if (query.getTimeOrder() == ScanQuery.TimeOrder.NONE ||
if (query.getOrder() == ScanQuery.Order.NONE ||
!query.getContextBoolean(ScanQuery.CTX_KEY_OUTERMOST, true)) {
ScanResultValue batch = yielder.get();
List events = (List) batch.getEvents();

View File

@ -92,7 +92,7 @@ public class ScanQueryRunnerFactory implements QueryRunnerFactory<ScanResultValu
// See the comment of CTX_TIMEOUT_AT.
final long timeoutAt = System.currentTimeMillis() + QueryContexts.getTimeout(queryPlus.getQuery());
responseContext.put(CTX_TIMEOUT_AT, timeoutAt);
if (query.getTimeOrder().equals(ScanQuery.TimeOrder.NONE)) {
if (query.getOrder().equals(ScanQuery.Order.NONE)) {
// Use normal strategy
return Sequences.concat(
Sequences.map(

View File

@ -46,7 +46,7 @@ public class ScanResultValueTimestampComparator implements Comparator<ScanResult
comparison = Longs.compare(
o1.getFirstEventTimestamp(scanQuery.getResultFormat()),
o2.getFirstEventTimestamp(scanQuery.getResultFormat()));
if (scanQuery.getTimeOrder().equals(ScanQuery.TimeOrder.DESCENDING)) {
if (scanQuery.getOrder().equals(ScanQuery.Order.DESCENDING)) {
return comparison;
}
return comparison * -1;

View File

@ -103,7 +103,7 @@ public class ScanQueryLimitRowIteratorTest
{
ScanQuery query = Druids.newScanQueryBuilder()
.limit(limit)
.timeOrder(ScanQuery.TimeOrder.NONE)
.timeOrder(ScanQuery.Order.NONE)
.dataSource("some datasource")
.batchSize(batchSize)
.intervals(QueryRunnerTestHelper.fullOnIntervalSpec)
@ -144,7 +144,7 @@ public class ScanQueryLimitRowIteratorTest
{
ScanQuery query = Druids.newScanQueryBuilder()
.limit(limit)
.timeOrder(ScanQuery.TimeOrder.DESCENDING)
.timeOrder(ScanQuery.Order.DESCENDING)
.dataSource("some datasource")
.batchSize(batchSize)
.intervals(QueryRunnerTestHelper.fullOnIntervalSpec)
@ -183,7 +183,7 @@ public class ScanQueryLimitRowIteratorTest
{
ScanQuery query = Druids.newScanQueryBuilder()
.limit(limit)
.timeOrder(ScanQuery.TimeOrder.DESCENDING)
.timeOrder(ScanQuery.Order.DESCENDING)
.dataSource("some datasource")
.batchSize(batchSize)
.intervals(QueryRunnerTestHelper.fullOnIntervalSpec)

View File

@ -56,14 +56,14 @@ public class ScanQueryRunnerFactoryTest
final int batchSize,
final long limit,
final ScanQuery.ResultFormat resultFormat,
final ScanQuery.TimeOrder timeOrder
final ScanQuery.Order order
)
{
this.numElements = numElements;
this.query = Druids.newScanQueryBuilder()
.batchSize(batchSize)
.limit(limit)
.timeOrder(timeOrder)
.timeOrder(order)
.intervals(QueryRunnerTestHelper.fullOnIntervalSpec)
.dataSource("some datasource")
.resultFormat(resultFormat)
@ -81,9 +81,9 @@ public class ScanQueryRunnerFactoryTest
ScanQuery.ResultFormat.RESULT_FORMAT_LIST,
ScanQuery.ResultFormat.RESULT_FORMAT_COMPACTED_LIST
);
List<ScanQuery.TimeOrder> timeOrder = ImmutableList.of(
ScanQuery.TimeOrder.ASCENDING,
ScanQuery.TimeOrder.DESCENDING
List<ScanQuery.Order> order = ImmutableList.of(
ScanQuery.Order.ASCENDING,
ScanQuery.Order.DESCENDING
);
return QueryRunnerTestHelper.cartesian(
@ -91,7 +91,7 @@ public class ScanQueryRunnerFactoryTest
batchSizes,
limits,
resultFormats,
timeOrder
order
);
}
@ -112,7 +112,7 @@ public class ScanQueryRunnerFactoryTest
} else if (o1 < o2) {
retVal = -1;
}
if (query.getTimeOrder().equals(ScanQuery.TimeOrder.DESCENDING)) {
if (query.getOrder().equals(ScanQuery.Order.DESCENDING)) {
return retVal * -1;
}
return retVal;
@ -138,7 +138,7 @@ public class ScanQueryRunnerFactoryTest
// check ordering is correct
for (int i = 1; i < output.size(); i++) {
if (query.getTimeOrder().equals(ScanQuery.TimeOrder.DESCENDING)) {
if (query.getOrder().equals(ScanQuery.Order.DESCENDING)) {
Assert.assertTrue(output.get(i).getFirstEventTimestamp(resultFormat) <
output.get(i - 1).getFirstEventTimestamp(resultFormat));
} else {

View File

@ -526,7 +526,7 @@ public class ScanQueryRunnerTest
.filters(new SelectorDimFilter(QueryRunnerTestHelper.marketDimension, "spot", null))
.columns(QueryRunnerTestHelper.qualityDimension, QueryRunnerTestHelper.indexMetric)
.limit(limit)
.timeOrder(ScanQuery.TimeOrder.ASCENDING)
.timeOrder(ScanQuery.Order.ASCENDING)
.context(ImmutableMap.of(ScanQuery.CTX_KEY_OUTERMOST, false))
.build();
@ -585,7 +585,7 @@ public class ScanQueryRunnerTest
.filters(new SelectorDimFilter(QueryRunnerTestHelper.marketDimension, "spot", null))
.columns(QueryRunnerTestHelper.qualityDimension, QueryRunnerTestHelper.indexMetric)
.limit(limit)
.timeOrder(ScanQuery.TimeOrder.DESCENDING)
.timeOrder(ScanQuery.Order.DESCENDING)
.build();
HashMap<String, Object> context = new HashMap<>();
@ -668,7 +668,7 @@ public class ScanQueryRunnerTest
.filters(new SelectorDimFilter(QueryRunnerTestHelper.marketDimension, "spot", null))
.columns(QueryRunnerTestHelper.qualityDimension, QueryRunnerTestHelper.indexMetric)
.resultFormat(ScanQuery.ResultFormat.RESULT_FORMAT_COMPACTED_LIST)
.timeOrder(ScanQuery.TimeOrder.ASCENDING)
.timeOrder(ScanQuery.Order.ASCENDING)
.limit(limit)
.build();
@ -729,7 +729,7 @@ public class ScanQueryRunnerTest
.filters(new SelectorDimFilter(QueryRunnerTestHelper.marketDimension, "spot", null))
.columns(QueryRunnerTestHelper.qualityDimension, QueryRunnerTestHelper.indexMetric)
.resultFormat(ScanQuery.ResultFormat.RESULT_FORMAT_COMPACTED_LIST)
.timeOrder(ScanQuery.TimeOrder.DESCENDING)
.timeOrder(ScanQuery.Order.DESCENDING)
.context(ImmutableMap.of(ScanQuery.CTX_KEY_OUTERMOST, false))
.limit(limit)
.build();

View File

@ -68,7 +68,7 @@ public class ScanQuerySpecTest
ScanQuery.ResultFormat.RESULT_FORMAT_LIST,
0,
3,
ScanQuery.TimeOrder.NONE,
ScanQuery.Order.NONE,
null,
Arrays.asList("market", "quality", "index"),
null,

View File

@ -52,7 +52,7 @@ public class ScanResultValueTimestampComparatorTest
public void comparisonDescendingListTest()
{
ScanQuery query = Druids.newScanQueryBuilder()
.timeOrder(ScanQuery.TimeOrder.DESCENDING)
.timeOrder(ScanQuery.Order.DESCENDING)
.resultFormat(ScanQuery.ResultFormat.RESULT_FORMAT_LIST)
.dataSource("some src")
.intervals(intervalSpec)
@ -89,7 +89,7 @@ public class ScanResultValueTimestampComparatorTest
public void comparisonAscendingListTest()
{
ScanQuery query = Druids.newScanQueryBuilder()
.timeOrder(ScanQuery.TimeOrder.ASCENDING)
.timeOrder(ScanQuery.Order.ASCENDING)
.resultFormat(ScanQuery.ResultFormat.RESULT_FORMAT_LIST)
.dataSource("some src")
.intervals(intervalSpec)
@ -126,7 +126,7 @@ public class ScanResultValueTimestampComparatorTest
public void comparisonDescendingCompactedListTest()
{
ScanQuery query = Druids.newScanQueryBuilder()
.timeOrder(ScanQuery.TimeOrder.DESCENDING)
.timeOrder(ScanQuery.Order.DESCENDING)
.resultFormat(ScanQuery.ResultFormat.RESULT_FORMAT_COMPACTED_LIST)
.dataSource("some src")
.intervals(intervalSpec)
@ -161,7 +161,7 @@ public class ScanResultValueTimestampComparatorTest
public void comparisonAscendingCompactedListTest()
{
ScanQuery query = Druids.newScanQueryBuilder()
.timeOrder(ScanQuery.TimeOrder.ASCENDING)
.timeOrder(ScanQuery.Order.ASCENDING)
.resultFormat(ScanQuery.ResultFormat.RESULT_FORMAT_COMPACTED_LIST)
.dataSource("some src")
.intervals(intervalSpec)