mirror of https://github.com/apache/druid.git
Finish rename
This commit is contained in:
parent
2528a56142
commit
4823dab895
|
@ -182,7 +182,7 @@ public class ScanBenchmark
|
||||||
return Druids.newScanQueryBuilder()
|
return Druids.newScanQueryBuilder()
|
||||||
.dataSource("blah")
|
.dataSource("blah")
|
||||||
.intervals(intervalSpec)
|
.intervals(intervalSpec)
|
||||||
.timeOrder(ordering);
|
.order(ordering);
|
||||||
}
|
}
|
||||||
|
|
||||||
private static Druids.ScanQueryBuilder basicB(final BenchmarkSchemaInfo basicSchema)
|
private static Druids.ScanQueryBuilder basicB(final BenchmarkSchemaInfo basicSchema)
|
||||||
|
@ -203,7 +203,7 @@ public class ScanBenchmark
|
||||||
.filters(filter)
|
.filters(filter)
|
||||||
.dataSource("blah")
|
.dataSource("blah")
|
||||||
.intervals(intervalSpec)
|
.intervals(intervalSpec)
|
||||||
.timeOrder(ordering);
|
.order(ordering);
|
||||||
}
|
}
|
||||||
|
|
||||||
private static Druids.ScanQueryBuilder basicC(final BenchmarkSchemaInfo basicSchema)
|
private static Druids.ScanQueryBuilder basicC(final BenchmarkSchemaInfo basicSchema)
|
||||||
|
@ -216,7 +216,7 @@ public class ScanBenchmark
|
||||||
.filters(new SelectorDimFilter(dimName, "3", StrlenExtractionFn.instance()))
|
.filters(new SelectorDimFilter(dimName, "3", StrlenExtractionFn.instance()))
|
||||||
.intervals(intervalSpec)
|
.intervals(intervalSpec)
|
||||||
.dataSource("blah")
|
.dataSource("blah")
|
||||||
.timeOrder(ordering);
|
.order(ordering);
|
||||||
}
|
}
|
||||||
|
|
||||||
private static Druids.ScanQueryBuilder basicD(final BenchmarkSchemaInfo basicSchema)
|
private static Druids.ScanQueryBuilder basicD(final BenchmarkSchemaInfo basicSchema)
|
||||||
|
@ -231,7 +231,7 @@ public class ScanBenchmark
|
||||||
.filters(new BoundDimFilter(dimName, "100", "10000", true, true, true, null, null))
|
.filters(new BoundDimFilter(dimName, "100", "10000", true, true, true, null, null))
|
||||||
.intervals(intervalSpec)
|
.intervals(intervalSpec)
|
||||||
.dataSource("blah")
|
.dataSource("blah")
|
||||||
.timeOrder(ordering);
|
.order(ordering);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Setup
|
@Setup
|
||||||
|
|
|
@ -61,7 +61,7 @@ The following are the main parameters for Scan queries:
|
||||||
|columns|A String array of dimensions and metrics to scan. If left empty, all dimensions and metrics are returned.|no|
|
|columns|A String array of dimensions and metrics to scan. If left empty, all dimensions and metrics are returned.|no|
|
||||||
|batchSize|How many rows buffered before return to client. Default is `20480`|no|
|
|batchSize|How many rows buffered before return to client. Default is `20480`|no|
|
||||||
|limit|How many rows to return. If not specified, all rows will be returned.|no|
|
|limit|How many rows to return. If not specified, all rows will be returned.|no|
|
||||||
|order|The ordering of returned rows based on timestamp. "ascending", "descending", and "none" (default) are supported. Currently, "ascending" and "descending" are only supported for queries where the limit is less than `druid.query.scan.maxRowsTimeOrderedInMemory`. Scan queries that are either legacy mode or have a limit greater than `druid.query.scan.maxRowsTimeOrderedInMemory` will not be time-ordered and default to a order of "none".|none|
|
|order|The ordering of returned rows based on timestamp. "ascending", "descending", and "none" (default) are supported. Currently, "ascending" and "descending" are only supported for queries where the limit is less than `druid.query.scan.maxRowsOrderedInMemory`. Scan queries that are either legacy mode or have a limit greater than `druid.query.scan.maxRowsOrderedInMemory` will not be time-ordered and default to a order of "none".|none|
|
||||||
|legacy|Return results consistent with the legacy "scan-query" contrib extension. Defaults to the value set by `druid.query.scan.legacy`, which in turn defaults to false. See [Legacy mode](#legacy-mode) for details.|no|
|
|legacy|Return results consistent with the legacy "scan-query" contrib extension. Defaults to the value set by `druid.query.scan.legacy`, which in turn defaults to false. See [Legacy mode](#legacy-mode) for details.|no|
|
||||||
|context|An additional JSON Object which can be used to specify certain flags.|no|
|
|context|An additional JSON Object which can be used to specify certain flags.|no|
|
||||||
|
|
||||||
|
@ -157,8 +157,8 @@ The format of the result when resultFormat equals `compactedList`:
|
||||||
|
|
||||||
The Scan query currently supports ordering based on timestamp for non-legacy queries. Note that using time ordering
|
The Scan query currently supports ordering based on timestamp for non-legacy queries. Note that using time ordering
|
||||||
will yield results that do not indicate which segment rows are from (`segmentId` will show up as `null`). Furthermore,
|
will yield results that do not indicate which segment rows are from (`segmentId` will show up as `null`). Furthermore,
|
||||||
time ordering is only supported where the result set limit is less than `druid.query.scan.maxRowsQueuedForTimeOrdering`
|
time ordering is only supported where the result set limit is less than `druid.query.scan.maxRowsQueuedForOrdering`
|
||||||
rows **or** fewer than `druid.query.scan.maxSegmentsTimeOrderedInMemory` segments are scanned per Historical. The
|
rows **or** fewer than `druid.query.scan.maxSegmentsOrderedInMemory` segments are scanned per Historical. The
|
||||||
reasoning behind these limitations is that the implementation of time ordering uses two strategies that can consume too
|
reasoning behind these limitations is that the implementation of time ordering uses two strategies that can consume too
|
||||||
much heap memory if left unbounded. These strategies (listed below) are chosen on a per-Historical basis depending on
|
much heap memory if left unbounded. These strategies (listed below) are chosen on a per-Historical basis depending on
|
||||||
query result set limit and the number of segments being scanned.
|
query result set limit and the number of segments being scanned.
|
||||||
|
@ -167,17 +167,17 @@ query result set limit and the number of segments being scanned.
|
||||||
queue which is ordered by timestamp. For every row above the result set limit, the row with the earliest (if descending)
|
queue which is ordered by timestamp. For every row above the result set limit, the row with the earliest (if descending)
|
||||||
or latest (if ascending) timestamp will be dequeued. After every row has been processed, the sorted contents of the
|
or latest (if ascending) timestamp will be dequeued. After every row has been processed, the sorted contents of the
|
||||||
priority queue are streamed back to the Broker(s) in batches. Attempting to load too many rows into memory runs the
|
priority queue are streamed back to the Broker(s) in batches. Attempting to load too many rows into memory runs the
|
||||||
risk of Historical nodes running out of memory. The `druid.query.scan.maxRowsQueuedForTimeOrdering` property protects
|
risk of Historical nodes running out of memory. The `druid.query.scan.maxRowsQueuedForOrdering` property protects
|
||||||
from this by limiting the number of rows in the query result set when time ordering is used.
|
from this by limiting the number of rows in the query result set when time ordering is used.
|
||||||
|
|
||||||
2. N-Way Merge: Each segment on a Historical is opened in parallel. Since each segment's rows are already
|
2. N-Way Merge: Each segment on a Historical is opened in parallel. Since each segment's rows are already
|
||||||
time-ordered, an n-way merge can be performed on the results from each segment. This approach doesn't persist the entire
|
time-ordered, an n-way merge can be performed on the results from each segment. This approach doesn't persist the entire
|
||||||
result set in memory (like the Priority Queue) as it streams back batches as they are returned from the merge function.
|
result set in memory (like the Priority Queue) as it streams back batches as they are returned from the merge function.
|
||||||
However, attempting to query too many segments could also result in high memory usage due to the need to open
|
However, attempting to query too many segments could also result in high memory usage due to the need to open
|
||||||
decompression and decoding buffers for each. The `druid.query.scan.maxSegmentsTimeOrderedInMemory` limit protects
|
decompression and decoding buffers for each. The `druid.query.scan.maxSegmentsOrderedInMemory` limit protects
|
||||||
from this by capping the number of segments opened per historical when time ordering is used.
|
from this by capping the number of segments opened per historical when time ordering is used.
|
||||||
|
|
||||||
Both `druid.query.scan.maxRowsQueuedForTimeOrdering` and `druid.query.scan.maxSegmentsTimeOrderedInMemory` are
|
Both `druid.query.scan.maxRowsQueuedForOrdering` and `druid.query.scan.maxSegmentsOrderedInMemory` are
|
||||||
configurable and can be tuned based on hardware specs and number of dimensions being queried.
|
configurable and can be tuned based on hardware specs and number of dimensions being queried.
|
||||||
|
|
||||||
## Legacy mode
|
## Legacy mode
|
||||||
|
@ -199,6 +199,6 @@ is complete.
|
||||||
|
|
||||||
|property|description|values|default|
|
|property|description|values|default|
|
||||||
|--------|-----------|------|-------|
|
|--------|-----------|------|-------|
|
||||||
|druid.query.scan.maxRowsQueuedForTimeOrdering|The maximum number of rows returned when time ordering is used|An integer in [0, 2147483647]|100000|
|
|druid.query.scan.maxRowsQueuedForOrdering|The maximum number of rows returned when time ordering is used|An integer in [0, 2147483647]|100000|
|
||||||
|druid.query.scan.maxSegmentsTimeOrderedInMemory|The maximum number of segments scanned per historical when time ordering is used|An integer in [0, 2147483647]|50|
|
|druid.query.scan.maxSegmentsOrderedInMemory|The maximum number of segments scanned per historical when time ordering is used|An integer in [0, 2147483647]|50|
|
||||||
|druid.query.scan.legacy|Whether legacy mode should be turned on for Scan queries|true or false|false|
|
|druid.query.scan.legacy|Whether legacy mode should be turned on for Scan queries|true or false|false|
|
|
@ -971,7 +971,7 @@ public class Druids
|
||||||
.columns(query.getColumns())
|
.columns(query.getColumns())
|
||||||
.legacy(query.isLegacy())
|
.legacy(query.isLegacy())
|
||||||
.context(query.getContext())
|
.context(query.getContext())
|
||||||
.timeOrder(query.getOrder());
|
.order(query.getOrder());
|
||||||
}
|
}
|
||||||
|
|
||||||
public ScanQueryBuilder dataSource(String ds)
|
public ScanQueryBuilder dataSource(String ds)
|
||||||
|
@ -1051,7 +1051,7 @@ public class Druids
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
public ScanQueryBuilder timeOrder(ScanQuery.Order order)
|
public ScanQueryBuilder order(ScanQuery.Order order)
|
||||||
{
|
{
|
||||||
this.order = order;
|
this.order = order;
|
||||||
return this;
|
return this;
|
||||||
|
|
|
@ -41,19 +41,19 @@ public class ScanQueryConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
@JsonProperty
|
@JsonProperty
|
||||||
private int maxRowsQueuedForTimeOrdering = 100000;
|
private int maxRowsQueuedForOrdering = 100000;
|
||||||
|
|
||||||
public int getMaxRowsQueuedForTimeOrdering()
|
public int getMaxRowsQueuedForOrdering()
|
||||||
{
|
{
|
||||||
return maxRowsQueuedForTimeOrdering;
|
return maxRowsQueuedForOrdering;
|
||||||
}
|
}
|
||||||
|
|
||||||
@JsonProperty
|
@JsonProperty
|
||||||
private int maxSegmentsTimeOrderedInMemory = 50;
|
private int maxSegmentsOrderedInMemory = 50;
|
||||||
|
|
||||||
public int getMaxSegmentsTimeOrderedInMemory()
|
public int getMaxSegmentsOrderedInMemory()
|
||||||
{
|
{
|
||||||
return maxSegmentsTimeOrderedInMemory;
|
return maxSegmentsOrderedInMemory;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -100,7 +100,7 @@ public class ScanQueryRunnerFactory implements QueryRunnerFactory<ScanResultValu
|
||||||
input -> input.run(queryPlus, responseContext)
|
input -> input.run(queryPlus, responseContext)
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
} else if (query.getLimit() <= scanQueryConfig.getMaxRowsQueuedForTimeOrdering()) {
|
} else if (query.getLimit() <= scanQueryConfig.getMaxRowsQueuedForOrdering()) {
|
||||||
// Use priority queue strategy
|
// Use priority queue strategy
|
||||||
return sortAndLimitScanResultValues(
|
return sortAndLimitScanResultValues(
|
||||||
Sequences.concat(Sequences.map(
|
Sequences.concat(Sequences.map(
|
||||||
|
@ -109,7 +109,7 @@ public class ScanQueryRunnerFactory implements QueryRunnerFactory<ScanResultValu
|
||||||
)),
|
)),
|
||||||
query
|
query
|
||||||
);
|
);
|
||||||
} else if (numSegments <= scanQueryConfig.getMaxSegmentsTimeOrderedInMemory()) {
|
} else if (numSegments <= scanQueryConfig.getMaxSegmentsOrderedInMemory()) {
|
||||||
// Use n-way merge strategy
|
// Use n-way merge strategy
|
||||||
final Sequence<ScanResultValue> unbatched =
|
final Sequence<ScanResultValue> unbatched =
|
||||||
Sequences.map(
|
Sequences.map(
|
||||||
|
@ -137,8 +137,8 @@ public class ScanQueryRunnerFactory implements QueryRunnerFactory<ScanResultValu
|
||||||
+ " %,d segments or lower the row limit below %,d.",
|
+ " %,d segments or lower the row limit below %,d.",
|
||||||
numSegments,
|
numSegments,
|
||||||
query.getLimit(),
|
query.getLimit(),
|
||||||
scanQueryConfig.getMaxSegmentsTimeOrderedInMemory(),
|
scanQueryConfig.getMaxSegmentsOrderedInMemory(),
|
||||||
scanQueryConfig.getMaxRowsQueuedForTimeOrdering()
|
scanQueryConfig.getMaxRowsQueuedForOrdering()
|
||||||
);
|
);
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,15 +31,15 @@ public class ScanQueryConfigTest
|
||||||
|
|
||||||
private final ImmutableMap<String, String> CONFIG_MAP = ImmutableMap
|
private final ImmutableMap<String, String> CONFIG_MAP = ImmutableMap
|
||||||
.<String, String>builder()
|
.<String, String>builder()
|
||||||
.put("maxSegmentsTimeOrderedInMemory", "1")
|
.put("maxSegmentsOrderedInMemory", "1")
|
||||||
.put("maxRowsQueuedForTimeOrdering", "1")
|
.put("maxRowsQueuedForOrdering", "1")
|
||||||
.put("legacy", "true")
|
.put("legacy", "true")
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
private final ImmutableMap<String, String> CONFIG_MAP2 = ImmutableMap
|
private final ImmutableMap<String, String> CONFIG_MAP2 = ImmutableMap
|
||||||
.<String, String>builder()
|
.<String, String>builder()
|
||||||
.put("legacy", "false")
|
.put("legacy", "false")
|
||||||
.put("maxSegmentsTimeOrderedInMemory", "42")
|
.put("maxSegmentsOrderedInMemory", "42")
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
private final ImmutableMap<String, String> CONFIG_MAP_EMPTY = ImmutableMap
|
private final ImmutableMap<String, String> CONFIG_MAP_EMPTY = ImmutableMap
|
||||||
|
@ -50,18 +50,18 @@ public class ScanQueryConfigTest
|
||||||
public void testSerde()
|
public void testSerde()
|
||||||
{
|
{
|
||||||
final ScanQueryConfig config = MAPPER.convertValue(CONFIG_MAP, ScanQueryConfig.class);
|
final ScanQueryConfig config = MAPPER.convertValue(CONFIG_MAP, ScanQueryConfig.class);
|
||||||
Assert.assertEquals(1, config.getMaxRowsQueuedForTimeOrdering());
|
Assert.assertEquals(1, config.getMaxRowsQueuedForOrdering());
|
||||||
Assert.assertEquals(1, config.getMaxSegmentsTimeOrderedInMemory());
|
Assert.assertEquals(1, config.getMaxSegmentsOrderedInMemory());
|
||||||
Assert.assertTrue(config.isLegacy());
|
Assert.assertTrue(config.isLegacy());
|
||||||
|
|
||||||
final ScanQueryConfig config2 = MAPPER.convertValue(CONFIG_MAP2, ScanQueryConfig.class);
|
final ScanQueryConfig config2 = MAPPER.convertValue(CONFIG_MAP2, ScanQueryConfig.class);
|
||||||
Assert.assertEquals(100000, config2.getMaxRowsQueuedForTimeOrdering());
|
Assert.assertEquals(100000, config2.getMaxRowsQueuedForOrdering());
|
||||||
Assert.assertEquals(42, config2.getMaxSegmentsTimeOrderedInMemory());
|
Assert.assertEquals(42, config2.getMaxSegmentsOrderedInMemory());
|
||||||
Assert.assertFalse(config2.isLegacy());
|
Assert.assertFalse(config2.isLegacy());
|
||||||
|
|
||||||
final ScanQueryConfig config3 = MAPPER.convertValue(CONFIG_MAP_EMPTY, ScanQueryConfig.class);
|
final ScanQueryConfig config3 = MAPPER.convertValue(CONFIG_MAP_EMPTY, ScanQueryConfig.class);
|
||||||
Assert.assertEquals(100000, config3.getMaxRowsQueuedForTimeOrdering());
|
Assert.assertEquals(100000, config3.getMaxRowsQueuedForOrdering());
|
||||||
Assert.assertEquals(50, config3.getMaxSegmentsTimeOrderedInMemory());
|
Assert.assertEquals(50, config3.getMaxSegmentsOrderedInMemory());
|
||||||
Assert.assertFalse(config3.isLegacy());
|
Assert.assertFalse(config3.isLegacy());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -99,11 +99,11 @@ public class ScanQueryLimitRowIteratorTest
|
||||||
* Expect no batching to occur and limit to be applied
|
* Expect no batching to occur and limit to be applied
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testNonTimeOrderedScan()
|
public void testNonOrderedScan()
|
||||||
{
|
{
|
||||||
ScanQuery query = Druids.newScanQueryBuilder()
|
ScanQuery query = Druids.newScanQueryBuilder()
|
||||||
.limit(limit)
|
.limit(limit)
|
||||||
.timeOrder(ScanQuery.Order.NONE)
|
.order(ScanQuery.Order.NONE)
|
||||||
.dataSource("some datasource")
|
.dataSource("some datasource")
|
||||||
.batchSize(batchSize)
|
.batchSize(batchSize)
|
||||||
.intervals(QueryRunnerTestHelper.fullOnIntervalSpec)
|
.intervals(QueryRunnerTestHelper.fullOnIntervalSpec)
|
||||||
|
@ -140,11 +140,11 @@ public class ScanQueryLimitRowIteratorTest
|
||||||
* is a sequence of single-event ScanResultValues.
|
* is a sequence of single-event ScanResultValues.
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testBrokerTimeOrderedScan()
|
public void testBrokerOrderedScan()
|
||||||
{
|
{
|
||||||
ScanQuery query = Druids.newScanQueryBuilder()
|
ScanQuery query = Druids.newScanQueryBuilder()
|
||||||
.limit(limit)
|
.limit(limit)
|
||||||
.timeOrder(ScanQuery.Order.DESCENDING)
|
.order(ScanQuery.Order.DESCENDING)
|
||||||
.dataSource("some datasource")
|
.dataSource("some datasource")
|
||||||
.batchSize(batchSize)
|
.batchSize(batchSize)
|
||||||
.intervals(QueryRunnerTestHelper.fullOnIntervalSpec)
|
.intervals(QueryRunnerTestHelper.fullOnIntervalSpec)
|
||||||
|
@ -179,11 +179,11 @@ public class ScanQueryLimitRowIteratorTest
|
||||||
* (unbatching and sorting occurs in ScanQueryRunnerFactory#mergeRunners()).
|
* (unbatching and sorting occurs in ScanQueryRunnerFactory#mergeRunners()).
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testHistoricalTimeOrderedScan()
|
public void testHistoricalOrderedScan()
|
||||||
{
|
{
|
||||||
ScanQuery query = Druids.newScanQueryBuilder()
|
ScanQuery query = Druids.newScanQueryBuilder()
|
||||||
.limit(limit)
|
.limit(limit)
|
||||||
.timeOrder(ScanQuery.Order.DESCENDING)
|
.order(ScanQuery.Order.DESCENDING)
|
||||||
.dataSource("some datasource")
|
.dataSource("some datasource")
|
||||||
.batchSize(batchSize)
|
.batchSize(batchSize)
|
||||||
.intervals(QueryRunnerTestHelper.fullOnIntervalSpec)
|
.intervals(QueryRunnerTestHelper.fullOnIntervalSpec)
|
||||||
|
|
|
@ -63,7 +63,7 @@ public class ScanQueryRunnerFactoryTest
|
||||||
this.query = Druids.newScanQueryBuilder()
|
this.query = Druids.newScanQueryBuilder()
|
||||||
.batchSize(batchSize)
|
.batchSize(batchSize)
|
||||||
.limit(limit)
|
.limit(limit)
|
||||||
.timeOrder(order)
|
.order(order)
|
||||||
.intervals(QueryRunnerTestHelper.fullOnIntervalSpec)
|
.intervals(QueryRunnerTestHelper.fullOnIntervalSpec)
|
||||||
.dataSource("some datasource")
|
.dataSource("some datasource")
|
||||||
.resultFormat(resultFormat)
|
.resultFormat(resultFormat)
|
||||||
|
|
|
@ -526,7 +526,7 @@ public class ScanQueryRunnerTest
|
||||||
.filters(new SelectorDimFilter(QueryRunnerTestHelper.marketDimension, "spot", null))
|
.filters(new SelectorDimFilter(QueryRunnerTestHelper.marketDimension, "spot", null))
|
||||||
.columns(QueryRunnerTestHelper.qualityDimension, QueryRunnerTestHelper.indexMetric)
|
.columns(QueryRunnerTestHelper.qualityDimension, QueryRunnerTestHelper.indexMetric)
|
||||||
.limit(limit)
|
.limit(limit)
|
||||||
.timeOrder(ScanQuery.Order.ASCENDING)
|
.order(ScanQuery.Order.ASCENDING)
|
||||||
.context(ImmutableMap.of(ScanQuery.CTX_KEY_OUTERMOST, false))
|
.context(ImmutableMap.of(ScanQuery.CTX_KEY_OUTERMOST, false))
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
|
@ -585,7 +585,7 @@ public class ScanQueryRunnerTest
|
||||||
.filters(new SelectorDimFilter(QueryRunnerTestHelper.marketDimension, "spot", null))
|
.filters(new SelectorDimFilter(QueryRunnerTestHelper.marketDimension, "spot", null))
|
||||||
.columns(QueryRunnerTestHelper.qualityDimension, QueryRunnerTestHelper.indexMetric)
|
.columns(QueryRunnerTestHelper.qualityDimension, QueryRunnerTestHelper.indexMetric)
|
||||||
.limit(limit)
|
.limit(limit)
|
||||||
.timeOrder(ScanQuery.Order.DESCENDING)
|
.order(ScanQuery.Order.DESCENDING)
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
HashMap<String, Object> context = new HashMap<>();
|
HashMap<String, Object> context = new HashMap<>();
|
||||||
|
@ -668,7 +668,7 @@ public class ScanQueryRunnerTest
|
||||||
.filters(new SelectorDimFilter(QueryRunnerTestHelper.marketDimension, "spot", null))
|
.filters(new SelectorDimFilter(QueryRunnerTestHelper.marketDimension, "spot", null))
|
||||||
.columns(QueryRunnerTestHelper.qualityDimension, QueryRunnerTestHelper.indexMetric)
|
.columns(QueryRunnerTestHelper.qualityDimension, QueryRunnerTestHelper.indexMetric)
|
||||||
.resultFormat(ScanQuery.ResultFormat.RESULT_FORMAT_COMPACTED_LIST)
|
.resultFormat(ScanQuery.ResultFormat.RESULT_FORMAT_COMPACTED_LIST)
|
||||||
.timeOrder(ScanQuery.Order.ASCENDING)
|
.order(ScanQuery.Order.ASCENDING)
|
||||||
.limit(limit)
|
.limit(limit)
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
|
@ -729,7 +729,7 @@ public class ScanQueryRunnerTest
|
||||||
.filters(new SelectorDimFilter(QueryRunnerTestHelper.marketDimension, "spot", null))
|
.filters(new SelectorDimFilter(QueryRunnerTestHelper.marketDimension, "spot", null))
|
||||||
.columns(QueryRunnerTestHelper.qualityDimension, QueryRunnerTestHelper.indexMetric)
|
.columns(QueryRunnerTestHelper.qualityDimension, QueryRunnerTestHelper.indexMetric)
|
||||||
.resultFormat(ScanQuery.ResultFormat.RESULT_FORMAT_COMPACTED_LIST)
|
.resultFormat(ScanQuery.ResultFormat.RESULT_FORMAT_COMPACTED_LIST)
|
||||||
.timeOrder(ScanQuery.Order.DESCENDING)
|
.order(ScanQuery.Order.DESCENDING)
|
||||||
.context(ImmutableMap.of(ScanQuery.CTX_KEY_OUTERMOST, false))
|
.context(ImmutableMap.of(ScanQuery.CTX_KEY_OUTERMOST, false))
|
||||||
.limit(limit)
|
.limit(limit)
|
||||||
.build();
|
.build();
|
||||||
|
|
|
@ -53,7 +53,7 @@ public class ScanQuerySpecTest
|
||||||
+ "\"resultFormat\":\"list\","
|
+ "\"resultFormat\":\"list\","
|
||||||
+ "\"batchSize\":20480,"
|
+ "\"batchSize\":20480,"
|
||||||
+ "\"limit\":3,"
|
+ "\"limit\":3,"
|
||||||
+ "\"timeOrder\":\"none\","
|
+ "\"order\":\"none\","
|
||||||
+ "\"filter\":null,"
|
+ "\"filter\":null,"
|
||||||
+ "\"columns\":[\"market\",\"quality\",\"index\"],"
|
+ "\"columns\":[\"market\",\"quality\",\"index\"],"
|
||||||
+ "\"legacy\":null,"
|
+ "\"legacy\":null,"
|
||||||
|
|
|
@ -52,7 +52,7 @@ public class ScanResultValueTimestampComparatorTest
|
||||||
public void comparisonDescendingListTest()
|
public void comparisonDescendingListTest()
|
||||||
{
|
{
|
||||||
ScanQuery query = Druids.newScanQueryBuilder()
|
ScanQuery query = Druids.newScanQueryBuilder()
|
||||||
.timeOrder(ScanQuery.Order.DESCENDING)
|
.order(ScanQuery.Order.DESCENDING)
|
||||||
.resultFormat(ScanQuery.ResultFormat.RESULT_FORMAT_LIST)
|
.resultFormat(ScanQuery.ResultFormat.RESULT_FORMAT_LIST)
|
||||||
.dataSource("some src")
|
.dataSource("some src")
|
||||||
.intervals(intervalSpec)
|
.intervals(intervalSpec)
|
||||||
|
@ -89,7 +89,7 @@ public class ScanResultValueTimestampComparatorTest
|
||||||
public void comparisonAscendingListTest()
|
public void comparisonAscendingListTest()
|
||||||
{
|
{
|
||||||
ScanQuery query = Druids.newScanQueryBuilder()
|
ScanQuery query = Druids.newScanQueryBuilder()
|
||||||
.timeOrder(ScanQuery.Order.ASCENDING)
|
.order(ScanQuery.Order.ASCENDING)
|
||||||
.resultFormat(ScanQuery.ResultFormat.RESULT_FORMAT_LIST)
|
.resultFormat(ScanQuery.ResultFormat.RESULT_FORMAT_LIST)
|
||||||
.dataSource("some src")
|
.dataSource("some src")
|
||||||
.intervals(intervalSpec)
|
.intervals(intervalSpec)
|
||||||
|
@ -126,7 +126,7 @@ public class ScanResultValueTimestampComparatorTest
|
||||||
public void comparisonDescendingCompactedListTest()
|
public void comparisonDescendingCompactedListTest()
|
||||||
{
|
{
|
||||||
ScanQuery query = Druids.newScanQueryBuilder()
|
ScanQuery query = Druids.newScanQueryBuilder()
|
||||||
.timeOrder(ScanQuery.Order.DESCENDING)
|
.order(ScanQuery.Order.DESCENDING)
|
||||||
.resultFormat(ScanQuery.ResultFormat.RESULT_FORMAT_COMPACTED_LIST)
|
.resultFormat(ScanQuery.ResultFormat.RESULT_FORMAT_COMPACTED_LIST)
|
||||||
.dataSource("some src")
|
.dataSource("some src")
|
||||||
.intervals(intervalSpec)
|
.intervals(intervalSpec)
|
||||||
|
@ -161,7 +161,7 @@ public class ScanResultValueTimestampComparatorTest
|
||||||
public void comparisonAscendingCompactedListTest()
|
public void comparisonAscendingCompactedListTest()
|
||||||
{
|
{
|
||||||
ScanQuery query = Druids.newScanQueryBuilder()
|
ScanQuery query = Druids.newScanQueryBuilder()
|
||||||
.timeOrder(ScanQuery.Order.ASCENDING)
|
.order(ScanQuery.Order.ASCENDING)
|
||||||
.resultFormat(ScanQuery.ResultFormat.RESULT_FORMAT_COMPACTED_LIST)
|
.resultFormat(ScanQuery.ResultFormat.RESULT_FORMAT_COMPACTED_LIST)
|
||||||
.dataSource("some src")
|
.dataSource("some src")
|
||||||
.intervals(intervalSpec)
|
.intervals(intervalSpec)
|
||||||
|
|
|
@ -511,7 +511,7 @@ public class CalciteQueryTest extends BaseCalciteQueryTest
|
||||||
ImmutableList.of(),
|
ImmutableList.of(),
|
||||||
ImmutableList.of(
|
ImmutableList.of(
|
||||||
new Object[]{
|
new Object[]{
|
||||||
"DruidQueryRel(query=[{\"queryType\":\"scan\",\"dataSource\":{\"type\":\"table\",\"name\":\"foo\"},\"intervals\":{\"type\":\"intervals\",\"intervals\":[\"-146136543-09-08T08:23:32.096Z/146140482-04-24T15:36:27.903Z\"]},\"virtualColumns\":[],\"resultFormat\":\"compactedList\",\"batchSize\":20480,\"limit\":9223372036854775807,\"timeOrder\":\"none\",\"filter\":null,\"columns\":[\"__time\",\"cnt\",\"dim1\",\"dim2\",\"dim3\",\"m1\",\"m2\",\"unique_dim1\"],\"legacy\":false,\"context\":{\"defaultTimeout\":300000,\"maxScatterGatherBytes\":9223372036854775807,\"sqlCurrentTimestamp\":\"2000-01-01T00:00:00Z\",\"sqlQueryId\":\"dummy\"},\"descending\":false,\"granularity\":{\"type\":\"all\"}}], signature=[{__time:LONG, cnt:LONG, dim1:STRING, dim2:STRING, dim3:STRING, m1:FLOAT, m2:DOUBLE, unique_dim1:COMPLEX}])\n"
|
"DruidQueryRel(query=[{\"queryType\":\"scan\",\"dataSource\":{\"type\":\"table\",\"name\":\"foo\"},\"intervals\":{\"type\":\"intervals\",\"intervals\":[\"-146136543-09-08T08:23:32.096Z/146140482-04-24T15:36:27.903Z\"]},\"virtualColumns\":[],\"resultFormat\":\"compactedList\",\"batchSize\":20480,\"limit\":9223372036854775807,\"order\":\"none\",\"filter\":null,\"columns\":[\"__time\",\"cnt\",\"dim1\",\"dim2\",\"dim3\",\"m1\",\"m2\",\"unique_dim1\"],\"legacy\":false,\"context\":{\"defaultTimeout\":300000,\"maxScatterGatherBytes\":9223372036854775807,\"sqlCurrentTimestamp\":\"2000-01-01T00:00:00Z\",\"sqlQueryId\":\"dummy\"},\"descending\":false,\"granularity\":{\"type\":\"all\"}}], signature=[{__time:LONG, cnt:LONG, dim1:STRING, dim2:STRING, dim3:STRING, m1:FLOAT, m2:DOUBLE, unique_dim1:COMPLEX}])\n"
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
|
@ -767,10 +767,10 @@ public class CalciteQueryTest extends BaseCalciteQueryTest
|
||||||
String emptyStringEq = NullHandling.replaceWithDefault() ? null : "\"\"";
|
String emptyStringEq = NullHandling.replaceWithDefault() ? null : "\"\"";
|
||||||
final String explanation =
|
final String explanation =
|
||||||
"BindableJoin(condition=[=($0, $2)], joinType=[inner])\n"
|
"BindableJoin(condition=[=($0, $2)], joinType=[inner])\n"
|
||||||
+ " DruidQueryRel(query=[{\"queryType\":\"scan\",\"dataSource\":{\"type\":\"table\",\"name\":\"foo\"},\"intervals\":{\"type\":\"intervals\",\"intervals\":[\"-146136543-09-08T08:23:32.096Z/146140482-04-24T15:36:27.903Z\"]},\"virtualColumns\":[],\"resultFormat\":\"compactedList\",\"batchSize\":20480,\"limit\":9223372036854775807,\"timeOrder\":\"none\",\"filter\":{\"type\":\"not\",\"field\":{\"type\":\"selector\",\"dimension\":\"dim1\",\"value\":"
|
+ " DruidQueryRel(query=[{\"queryType\":\"scan\",\"dataSource\":{\"type\":\"table\",\"name\":\"foo\"},\"intervals\":{\"type\":\"intervals\",\"intervals\":[\"-146136543-09-08T08:23:32.096Z/146140482-04-24T15:36:27.903Z\"]},\"virtualColumns\":[],\"resultFormat\":\"compactedList\",\"batchSize\":20480,\"limit\":9223372036854775807,\"order\":\"none\",\"filter\":{\"type\":\"not\",\"field\":{\"type\":\"selector\",\"dimension\":\"dim1\",\"value\":"
|
||||||
+ emptyStringEq
|
+ emptyStringEq
|
||||||
+ ",\"extractionFn\":null}},\"columns\":[\"dim1\"],\"legacy\":false,\"context\":{\"defaultTimeout\":300000,\"maxScatterGatherBytes\":9223372036854775807,\"sqlCurrentTimestamp\":\"2000-01-01T00:00:00Z\",\"sqlQueryId\":\"dummy\"},\"descending\":false,\"granularity\":{\"type\":\"all\"}}], signature=[{dim1:STRING}])\n"
|
+ ",\"extractionFn\":null}},\"columns\":[\"dim1\"],\"legacy\":false,\"context\":{\"defaultTimeout\":300000,\"maxScatterGatherBytes\":9223372036854775807,\"sqlCurrentTimestamp\":\"2000-01-01T00:00:00Z\",\"sqlQueryId\":\"dummy\"},\"descending\":false,\"granularity\":{\"type\":\"all\"}}], signature=[{dim1:STRING}])\n"
|
||||||
+ " DruidQueryRel(query=[{\"queryType\":\"scan\",\"dataSource\":{\"type\":\"table\",\"name\":\"foo\"},\"intervals\":{\"type\":\"intervals\",\"intervals\":[\"-146136543-09-08T08:23:32.096Z/146140482-04-24T15:36:27.903Z\"]},\"virtualColumns\":[],\"resultFormat\":\"compactedList\",\"batchSize\":20480,\"limit\":9223372036854775807,\"timeOrder\":\"none\",\"filter\":null,\"columns\":[\"dim1\",\"dim2\"],\"legacy\":false,\"context\":{\"defaultTimeout\":300000,\"maxScatterGatherBytes\":9223372036854775807,\"sqlCurrentTimestamp\":\"2000-01-01T00:00:00Z\",\"sqlQueryId\":\"dummy\"},\"descending\":false,\"granularity\":{\"type\":\"all\"}}], signature=[{dim1:STRING, dim2:STRING}])\n";
|
+ " DruidQueryRel(query=[{\"queryType\":\"scan\",\"dataSource\":{\"type\":\"table\",\"name\":\"foo\"},\"intervals\":{\"type\":\"intervals\",\"intervals\":[\"-146136543-09-08T08:23:32.096Z/146140482-04-24T15:36:27.903Z\"]},\"virtualColumns\":[],\"resultFormat\":\"compactedList\",\"batchSize\":20480,\"limit\":9223372036854775807,\"order\":\"none\",\"filter\":null,\"columns\":[\"dim1\",\"dim2\"],\"legacy\":false,\"context\":{\"defaultTimeout\":300000,\"maxScatterGatherBytes\":9223372036854775807,\"sqlCurrentTimestamp\":\"2000-01-01T00:00:00Z\",\"sqlQueryId\":\"dummy\"},\"descending\":false,\"granularity\":{\"type\":\"all\"}}], signature=[{dim1:STRING, dim2:STRING}])\n";
|
||||||
|
|
||||||
testQuery(
|
testQuery(
|
||||||
PLANNER_CONFIG_FALLBACK,
|
PLANNER_CONFIG_FALLBACK,
|
||||||
|
@ -6669,7 +6669,7 @@ public class CalciteQueryTest extends BaseCalciteQueryTest
|
||||||
+ " BindableFilter(condition=[OR(=($0, 'xxx'), CAST(AND(IS NOT NULL($4), <>($2, 0), IS NOT NULL($1))):BOOLEAN)])\n"
|
+ " BindableFilter(condition=[OR(=($0, 'xxx'), CAST(AND(IS NOT NULL($4), <>($2, 0), IS NOT NULL($1))):BOOLEAN)])\n"
|
||||||
+ " BindableJoin(condition=[=($1, $3)], joinType=[left])\n"
|
+ " BindableJoin(condition=[=($1, $3)], joinType=[left])\n"
|
||||||
+ " BindableJoin(condition=[true], joinType=[inner])\n"
|
+ " BindableJoin(condition=[true], joinType=[inner])\n"
|
||||||
+ " DruidQueryRel(query=[{\"queryType\":\"scan\",\"dataSource\":{\"type\":\"table\",\"name\":\"foo\"},\"intervals\":{\"type\":\"intervals\",\"intervals\":[\"-146136543-09-08T08:23:32.096Z/146140482-04-24T15:36:27.903Z\"]},\"virtualColumns\":[],\"resultFormat\":\"compactedList\",\"batchSize\":20480,\"limit\":9223372036854775807,\"timeOrder\":\"none\",\"filter\":null,\"columns\":[\"dim1\",\"dim2\"],\"legacy\":false,\"context\":{\"defaultTimeout\":300000,\"maxScatterGatherBytes\":9223372036854775807,\"sqlCurrentTimestamp\":\"2000-01-01T00:00:00Z\",\"sqlQueryId\":\"dummy\"},\"descending\":false,\"granularity\":{\"type\":\"all\"}}], signature=[{dim1:STRING, dim2:STRING}])\n"
|
+ " DruidQueryRel(query=[{\"queryType\":\"scan\",\"dataSource\":{\"type\":\"table\",\"name\":\"foo\"},\"intervals\":{\"type\":\"intervals\",\"intervals\":[\"-146136543-09-08T08:23:32.096Z/146140482-04-24T15:36:27.903Z\"]},\"virtualColumns\":[],\"resultFormat\":\"compactedList\",\"batchSize\":20480,\"limit\":9223372036854775807,\"order\":\"none\",\"filter\":null,\"columns\":[\"dim1\",\"dim2\"],\"legacy\":false,\"context\":{\"defaultTimeout\":300000,\"maxScatterGatherBytes\":9223372036854775807,\"sqlCurrentTimestamp\":\"2000-01-01T00:00:00Z\",\"sqlQueryId\":\"dummy\"},\"descending\":false,\"granularity\":{\"type\":\"all\"}}], signature=[{dim1:STRING, dim2:STRING}])\n"
|
||||||
+ " DruidQueryRel(query=[{\"queryType\":\"timeseries\",\"dataSource\":{\"type\":\"table\",\"name\":\"foo\"},\"intervals\":{\"type\":\"intervals\",\"intervals\":[\"-146136543-09-08T08:23:32.096Z/146140482-04-24T15:36:27.903Z\"]},\"descending\":false,\"virtualColumns\":[],\"filter\":{\"type\":\"like\",\"dimension\":\"dim1\",\"pattern\":\"%bc\",\"escape\":null,\"extractionFn\":null},\"granularity\":{\"type\":\"all\"},\"aggregations\":[{\"type\":\"count\",\"name\":\"a0\"}],\"postAggregations\":[],\"limit\":2147483647,\"context\":{\"defaultTimeout\":300000,\"maxScatterGatherBytes\":9223372036854775807,\"skipEmptyBuckets\":true,\"sqlCurrentTimestamp\":\"2000-01-01T00:00:00Z\",\"sqlQueryId\":\"dummy\"}}], signature=[{a0:LONG}])\n"
|
+ " DruidQueryRel(query=[{\"queryType\":\"timeseries\",\"dataSource\":{\"type\":\"table\",\"name\":\"foo\"},\"intervals\":{\"type\":\"intervals\",\"intervals\":[\"-146136543-09-08T08:23:32.096Z/146140482-04-24T15:36:27.903Z\"]},\"descending\":false,\"virtualColumns\":[],\"filter\":{\"type\":\"like\",\"dimension\":\"dim1\",\"pattern\":\"%bc\",\"escape\":null,\"extractionFn\":null},\"granularity\":{\"type\":\"all\"},\"aggregations\":[{\"type\":\"count\",\"name\":\"a0\"}],\"postAggregations\":[],\"limit\":2147483647,\"context\":{\"defaultTimeout\":300000,\"maxScatterGatherBytes\":9223372036854775807,\"skipEmptyBuckets\":true,\"sqlCurrentTimestamp\":\"2000-01-01T00:00:00Z\",\"sqlQueryId\":\"dummy\"}}], signature=[{a0:LONG}])\n"
|
||||||
+ " DruidQueryRel(query=[{\"queryType\":\"groupBy\",\"dataSource\":{\"type\":\"table\",\"name\":\"foo\"},\"intervals\":{\"type\":\"intervals\",\"intervals\":[\"-146136543-09-08T08:23:32.096Z/146140482-04-24T15:36:27.903Z\"]},\"virtualColumns\":[{\"type\":\"expression\",\"name\":\"v0\",\"expression\":\"1\",\"outputType\":\"LONG\"}],\"filter\":{\"type\":\"like\",\"dimension\":\"dim1\",\"pattern\":\"%bc\",\"escape\":null,\"extractionFn\":null},\"granularity\":{\"type\":\"all\"},\"dimensions\":[{\"type\":\"default\",\"dimension\":\"dim1\",\"outputName\":\"d0\",\"outputType\":\"STRING\"},{\"type\":\"default\",\"dimension\":\"v0\",\"outputName\":\"v0\",\"outputType\":\"LONG\"}],\"aggregations\":[],\"postAggregations\":[],\"having\":null,\"limitSpec\":{\"type\":\"NoopLimitSpec\"},\"context\":{\"defaultTimeout\":300000,\"maxScatterGatherBytes\":9223372036854775807,\"sqlCurrentTimestamp\":\"2000-01-01T00:00:00Z\",\"sqlQueryId\":\"dummy\"},\"descending\":false}], signature=[{d0:STRING, v0:LONG}])\n";
|
+ " DruidQueryRel(query=[{\"queryType\":\"groupBy\",\"dataSource\":{\"type\":\"table\",\"name\":\"foo\"},\"intervals\":{\"type\":\"intervals\",\"intervals\":[\"-146136543-09-08T08:23:32.096Z/146140482-04-24T15:36:27.903Z\"]},\"virtualColumns\":[{\"type\":\"expression\",\"name\":\"v0\",\"expression\":\"1\",\"outputType\":\"LONG\"}],\"filter\":{\"type\":\"like\",\"dimension\":\"dim1\",\"pattern\":\"%bc\",\"escape\":null,\"extractionFn\":null},\"granularity\":{\"type\":\"all\"},\"dimensions\":[{\"type\":\"default\",\"dimension\":\"dim1\",\"outputName\":\"d0\",\"outputType\":\"STRING\"},{\"type\":\"default\",\"dimension\":\"v0\",\"outputName\":\"v0\",\"outputType\":\"LONG\"}],\"aggregations\":[],\"postAggregations\":[],\"having\":null,\"limitSpec\":{\"type\":\"NoopLimitSpec\"},\"context\":{\"defaultTimeout\":300000,\"maxScatterGatherBytes\":9223372036854775807,\"sqlCurrentTimestamp\":\"2000-01-01T00:00:00Z\",\"sqlQueryId\":\"dummy\"},\"descending\":false}], signature=[{d0:STRING, v0:LONG}])\n";
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue