explicit outputType for ExpressionPostAggregator, better documentation for the differences between arrays and mvds (#15245)

* better documentation for the differences between arrays and mvds
* add outputType to ExpressionPostAggregator to make docs true
* add output coercion if outputType is defined on ExpressionPostAgg
* updated post-aggregations.md to be consistent with aggregations.md and filters.md and use tables
This commit is contained in:
Clint Wylie 2023-11-02 00:31:37 -07:00 committed by GitHub
parent 22443ab87e
commit d261587f4a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
33 changed files with 1120 additions and 423 deletions

View File

@ -192,18 +192,13 @@ To perform ingestion with rollup:
2. Set [`finalizeAggregations: false`](reference.md#context-parameters) in your context. This causes aggregation
functions to write their internal state to the generated segments, instead of the finalized end result, and enables
further aggregation at query time.
3. Wrap all multi-value strings in `MV_TO_ARRAY(...)` and set [`groupByEnableMultiValueUnnesting:
false`](reference.md#context-parameters) in your context. This ensures that multi-value strings are left alone and
remain lists, instead of being [automatically unnested](../querying/sql-data-types.md#multi-value-strings) by the
`GROUP BY` operator.
3. See [ARRAY types](../querying/arrays.md#sql-based-ingestion-with-rollup) for information about ingesting `ARRAY` columns
4. See [multi-value dimensions](../querying/multi-value-dimensions.md#sql-based-ingestion-with-rollup) for information to ingest multi-value VARCHAR columns
When you do all of these things, Druid understands that you intend to do an ingestion with rollup, and it writes
rollup-related metadata into the generated segments. Other applications can then use [`segmentMetadata`
queries](../querying/segmentmetadataquery.md) to retrieve rollup-related information.
If you see the error "Encountered multi-value dimension `x` that cannot be processed with
groupByEnableMultiValueUnnesting set to false", then wrap that column in `MV_TO_ARRAY(x) AS x`.
The following [aggregation functions](../querying/sql-aggregations.md) are supported for rollup at ingestion time:
`COUNT` (but switch to `SUM` at query time), `SUM`, `MIN`, `MAX`, `EARLIEST` and `EARLIEST_BY` ([string only](known-issues.md#select-statement)),
`LATEST` and `LATEST_BY` ([string only](known-issues.md#select-statement)), `APPROX_COUNT_DISTINCT`, `APPROX_COUNT_DISTINCT_BUILTIN`,

View File

@ -79,7 +79,7 @@ CLUSTERED BY channel
## INSERT with rollup
This example inserts data into a table named `kttm_data` and performs data rollup. This example implements the recommendations described in [Rollup](./concepts.md#rollup).
This example inserts data into a table named `kttm_rollup` and performs data rollup. This example implements the recommendations described in [Rollup](./concepts.md#rollup).
<details><summary>Show the query</summary>
@ -91,7 +91,7 @@ SELECT * FROM TABLE(
EXTERN(
'{"type":"http","uris":["https://static.imply.io/example-data/kttm-v2/kttm-v2-2019-08-25.json.gz"]}',
'{"type":"json"}',
'[{"name":"timestamp","type":"string"},{"name":"agent_category","type":"string"},{"name":"agent_type","type":"string"},{"name":"browser","type":"string"},{"name":"browser_version","type":"string"},{"name":"city","type":"string"},{"name":"continent","type":"string"},{"name":"country","type":"string"},{"name":"version","type":"string"},{"name":"event_type","type":"string"},{"name":"event_subtype","type":"string"},{"name":"loaded_image","type":"string"},{"name":"adblock_list","type":"string"},{"name":"forwarded_for","type":"string"},{"name":"language","type":"string"},{"name":"number","type":"long"},{"name":"os","type":"string"},{"name":"path","type":"string"},{"name":"platform","type":"string"},{"name":"referrer","type":"string"},{"name":"referrer_host","type":"string"},{"name":"region","type":"string"},{"name":"remote_address","type":"string"},{"name":"screen","type":"string"},{"name":"session","type":"string"},{"name":"session_length","type":"long"},{"name":"timezone","type":"string"},{"name":"timezone_offset","type":"long"},{"name":"window","type":"string"}]'
'[{"name":"timestamp","type":"string"},{"name":"agent_category","type":"string"},{"name":"agent_type","type":"string"},{"name":"browser","type":"string"},{"name":"browser_version","type":"string"},{"name":"city","type":"string"},{"name":"continent","type":"string"},{"name":"country","type":"string"},{"name":"version","type":"string"},{"name":"event_type","type":"string"},{"name":"event_subtype","type":"string"},{"name":"loaded_image","type":"string"},{"name":"adblock_list","type":"string"},{"name":"forwarded_for","type":"string"},{"name":"number","type":"long"},{"name":"os","type":"string"},{"name":"path","type":"string"},{"name":"platform","type":"string"},{"name":"referrer","type":"string"},{"name":"referrer_host","type":"string"},{"name":"region","type":"string"},{"name":"remote_address","type":"string"},{"name":"screen","type":"string"},{"name":"session","type":"string"},{"name":"session_length","type":"long"},{"name":"timezone","type":"string"},{"name":"timezone_offset","type":"long"},{"name":"window","type":"string"}]'
)
))
@ -101,8 +101,7 @@ SELECT
agent_category,
agent_type,
browser,
browser_version,
MV_TO_ARRAY("language") AS "language", -- Multi-value string dimension
browser_version
os,
city,
country,
@ -113,11 +112,10 @@ SELECT
APPROX_COUNT_DISTINCT_DS_HLL(event_type) AS unique_event_types
FROM kttm_data
WHERE os = 'iOS'
GROUP BY 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
GROUP BY 1, 2, 3, 4, 5, 6, 7, 8, 9, 10
PARTITIONED BY HOUR
CLUSTERED BY browser, session
```
</details>
## INSERT for reindexing an existing datasource

View File

@ -232,23 +232,25 @@ If you're using the web console, you can specify the context parameters through
The following table lists the context parameters for the MSQ task engine:
| Parameter | Description | Default value |
|---|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---|
| `maxNumTasks` | SELECT, INSERT, REPLACE<br /><br />The maximum total number of tasks to launch, including the controller task. The lowest possible value for this setting is 2: one controller and one worker. All tasks must be able to launch simultaneously. If they cannot, the query returns a `TaskStartTimeout` error code after approximately 10 minutes.<br /><br />May also be provided as `numTasks`. If both are present, `maxNumTasks` takes priority. | 2 |
| `taskAssignment` | SELECT, INSERT, REPLACE<br /><br />Determines how many tasks to use. Possible values include: <ul><li>`max`: Uses as many tasks as possible, up to `maxNumTasks`.</li><li>`auto`: When file sizes can be determined through directory listing (for example: local files, S3, GCS, HDFS) uses as few tasks as possible without exceeding 512 MiB or 10,000 files per task, unless exceeding these limits is necessary to stay within `maxNumTasks`. When calculating the size of files, the weighted size is used, which considers the file format and compression format used if any. When file sizes cannot be determined through directory listing (for example: http), behaves the same as `max`.</li></ul> | `max` |
| `finalizeAggregations` | SELECT, INSERT, REPLACE<br /><br />Determines the type of aggregation to return. If true, Druid finalizes the results of complex aggregations that directly appear in query results. If false, Druid returns the aggregation's intermediate type rather than finalized type. This parameter is useful during ingestion, where it enables storing sketches directly in Druid tables. For more information about aggregations, see [SQL aggregation functions](../querying/sql-aggregations.md). | true |
| `sqlJoinAlgorithm` | SELECT, INSERT, REPLACE<br /><br />Algorithm to use for JOIN. Use `broadcast` (the default) for broadcast hash join or `sortMerge` for sort-merge join. Affects all JOIN operations in the query. This is a hint to the MSQ engine and the actual joins in the query may proceed in a different way than specified. See [Joins](#joins) for more details. | `broadcast` |
| `rowsInMemory` | INSERT or REPLACE<br /><br />Maximum number of rows to store in memory at once before flushing to disk during the segment generation process. Ignored for non-INSERT queries. In most cases, use the default value. You may need to override the default if you run into one of the [known issues](./known-issues.md) around memory usage. | 100,000 |
| Parameter | Description | Default value |
|---|---|---|
| `maxNumTasks` | SELECT, INSERT, REPLACE<br /><br />The maximum total number of tasks to launch, including the controller task. The lowest possible value for this setting is 2: one controller and one worker. All tasks must be able to launch simultaneously. If they cannot, the query returns a `TaskStartTimeout` error code after approximately 10 minutes.<br /><br />May also be provided as `numTasks`. If both are present, `maxNumTasks` takes priority. | 2 |
| `taskAssignment` | SELECT, INSERT, REPLACE<br /><br />Determines how many tasks to use. Possible values include: <ul><li>`max`: Uses as many tasks as possible, up to `maxNumTasks`.</li><li>`auto`: When file sizes can be determined through directory listing (for example: local files, S3, GCS, HDFS) uses as few tasks as possible without exceeding 512 MiB or 10,000 files per task, unless exceeding these limits is necessary to stay within `maxNumTasks`. When calculating the size of files, the weighted size is used, which considers the file format and compression format used if any. When file sizes cannot be determined through directory listing (for example: http), behaves the same as `max`.</li></ul> | `max` |
| `finalizeAggregations` | SELECT, INSERT, REPLACE<br /><br />Determines the type of aggregation to return. If true, Druid finalizes the results of complex aggregations that directly appear in query results. If false, Druid returns the aggregation's intermediate type rather than finalized type. This parameter is useful during ingestion, where it enables storing sketches directly in Druid tables. For more information about aggregations, see [SQL aggregation functions](../querying/sql-aggregations.md). | true |
| `arrayIngestMode` | INSERT, REPLACE<br /><br /> Controls how ARRAY type values are stored in Druid segments. When set to `array` (recommended for SQL compliance), Druid will store all ARRAY typed values in [ARRAY typed columns](../querying/arrays.md), and supports storing both VARCHAR and numeric typed arrays. When set to `mvd` (the default, for backwards compatibility), Druid only supports VARCHAR typed arrays, and will store them as [multi-value string columns](../querying/multi-value-dimensions.md). When set to `none`, Druid will throw an exception when trying to store any type of arrays. `none` is most useful when set in the system default query context with (`druid.query.default.context.arrayIngestMode=none`) to be used to help migrate operators from `mvd` mode to `array` mode and force query writers to make an explicit choice between ARRAY and multi-value VARCHAR typed columns. | `mvd` (for backwards compatibility, recommended to use `array` for SQL compliance)|
| `sqlJoinAlgorithm` | SELECT, INSERT, REPLACE<br /><br />Algorithm to use for JOIN. Use `broadcast` (the default) for broadcast hash join or `sortMerge` for sort-merge join. Affects all JOIN operations in the query. This is a hint to the MSQ engine and the actual joins in the query may proceed in a different way than specified. See [Joins](#joins) for more details. | `broadcast` |
| `rowsInMemory` | INSERT or REPLACE<br /><br />Maximum number of rows to store in memory at once before flushing to disk during the segment generation process. Ignored for non-INSERT queries. In most cases, use the default value. You may need to override the default if you run into one of the [known issues](./known-issues.md) around memory usage. | 100,000 |
| `segmentSortOrder` | INSERT or REPLACE<br /><br />Normally, Druid sorts rows in individual segments using `__time` first, followed by the [CLUSTERED BY](#clustered-by) clause. When you set `segmentSortOrder`, Druid sorts rows in segments using this column list first, followed by the CLUSTERED BY order.<br /><br />You provide the column list as comma-separated values or as a JSON array in string form. If your query includes `__time`, then this list must begin with `__time`. For example, consider an INSERT query that uses `CLUSTERED BY country` and has `segmentSortOrder` set to `__time,city`. Within each time chunk, Druid assigns rows to segments based on `country`, and then within each of those segments, Druid sorts those rows by `__time` first, then `city`, then `country`. | empty list |
| `maxParseExceptions`| SELECT, INSERT, REPLACE<br /><br />Maximum number of parse exceptions that are ignored while executing the query before it stops with `TooManyWarningsFault`. To ignore all the parse exceptions, set the value to -1. | 0 |
| `rowsPerSegment` | INSERT or REPLACE<br /><br />The number of rows per segment to target. The actual number of rows per segment may be somewhat higher or lower than this number. In most cases, use the default. For general information about sizing rows per segment, see [Segment Size Optimization](../operations/segment-optimization.md). | 3,000,000 |
| `indexSpec` | INSERT or REPLACE<br /><br />An [`indexSpec`](../ingestion/ingestion-spec.md#indexspec) to use when generating segments. May be a JSON string or object. See [Front coding](../ingestion/ingestion-spec.md#front-coding) for details on configuring an `indexSpec` with front coding. | See [`indexSpec`](../ingestion/ingestion-spec.md#indexspec). |
| `durableShuffleStorage` | SELECT, INSERT, REPLACE <br /><br />Whether to use durable storage for shuffle mesh. To use this feature, configure the durable storage at the server level using `druid.msq.intermediate.storage.enable=true`). If these properties are not configured, any query with the context variable `durableShuffleStorage=true` fails with a configuration error. <br /><br /> | `false` |
| `faultTolerance` | SELECT, INSERT, REPLACE<br /><br /> Whether to turn on fault tolerance mode or not. Failed workers are retried based on [Limits](#limits). Cannot be used when `durableShuffleStorage` is explicitly set to false. | `false` |
| `selectDestination` | SELECT<br /><br /> Controls where the final result of the select query is written. <br />Use `taskReport`(the default) to write select results to the task report. <b> This is not scalable since task reports size explodes for large results </b> <br/>Use `durableStorage` to write results to durable storage location. <b>For large results sets, its recommended to use `durableStorage` </b>. To configure durable storage see [`this`](#durable-storage) section. | `taskReport` |
| `waitUntilSegmentsLoad` | INSERT, REPLACE<br /><br /> If set, the ingest query waits for the generated segment to be loaded before exiting, else the ingest query exits without waiting. The task and live reports contain the information about the status of loading segments if this flag is set. This will ensure that any future queries made after the ingestion exits will include results from the ingestion. The drawback is that the controller task will stall until the segments are loaded. | `false` |
| `includeSegmentSource` | SELECT, INSERT, REPLACE<br /><br /> Controls the sources, which will be queried for results in addition to the segments present on deep storage. Can be `NONE` or `REALTIME`. If this value is `NONE`, only non-realtime (published and used) segments will be downloaded from deep storage. If this value is `REALTIME`, results will also be included from realtime tasks. | `NONE` |
| `rowsPerPage` | SELECT<br /><br />The number of rows per page to target. The actual number of rows per page may be somewhat higher or lower than this number. In most cases, use the default.<br /> This property comes into effect only when `selectDestination` is set to `durableStorage` | 100000 |
| `maxParseExceptions`| SELECT, INSERT, REPLACE<br /><br />Maximum number of parse exceptions that are ignored while executing the query before it stops with `TooManyWarningsFault`. To ignore all the parse exceptions, set the value to -1. | 0 |
| `rowsPerSegment` | INSERT or REPLACE<br /><br />The number of rows per segment to target. The actual number of rows per segment may be somewhat higher or lower than this number. In most cases, use the default. For general information about sizing rows per segment, see [Segment Size Optimization](../operations/segment-optimization.md). | 3,000,000 |
| `indexSpec` | INSERT or REPLACE<br /><br />An [`indexSpec`](../ingestion/ingestion-spec.md#indexspec) to use when generating segments. May be a JSON string or object. See [Front coding](../ingestion/ingestion-spec.md#front-coding) for details on configuring an `indexSpec` with front coding. | See [`indexSpec`](../ingestion/ingestion-spec.md#indexspec). |
| `durableShuffleStorage` | SELECT, INSERT, REPLACE <br /><br />Whether to use durable storage for shuffle mesh. To use this feature, configure the durable storage at the server level using `druid.msq.intermediate.storage.enable=true`). If these properties are not configured, any query with the context variable `durableShuffleStorage=true` fails with a configuration error. <br /><br /> | `false` |
| `faultTolerance` | SELECT, INSERT, REPLACE<br /><br /> Whether to turn on fault tolerance mode or not. Failed workers are retried based on [Limits](#limits). Cannot be used when `durableShuffleStorage` is explicitly set to false. | `false` |
| `selectDestination` | SELECT<br /><br /> Controls where the final result of the select query is written. <br />Use `taskReport`(the default) to write select results to the task report. <b> This is not scalable since task reports size explodes for large results </b> <br/>Use `durableStorage` to write results to durable storage location. <b>For large results sets, its recommended to use `durableStorage` </b>. To configure durable storage see [`this`](#durable-storage) section. | `taskReport` |
| `waitUntilSegmentsLoad` | INSERT, REPLACE<br /><br /> If set, the ingest query waits for the generated segment to be loaded before exiting, else the ingest query exits without waiting. The task and live reports contain the information about the status of loading segments if this flag is set. This will ensure that any future queries made after the ingestion exits will include results from the ingestion. The drawback is that the controller task will stall till the segments are loaded. | `false` |
| `includeSegmentSource` | SELECT, INSERT, REPLACE<br /><br /> Controls the sources, which will be queried for results in addition to the segments present on deep storage. Can be `NONE` or `REALTIME`. If this value is `NONE`, only non-realtime (published and used) segments will be downloaded from deep storage. If this value is `REALTIME`, results will also be included from realtime tasks. | `NONE` |
| `rowsPerPage` | SELECT<br /><br />The number of rows per page to target. The actual number of rows per page may be somewhat higher or lower than this number. In most cases, use the default.<br /> This property comes into effect only when `selectDestination` is set to `durableStorage` | 100000 |
## Joins

253
docs/querying/arrays.md Normal file
View File

@ -0,0 +1,253 @@
---
id: arrays
title: "Arrays"
---
<!--
~ Licensed to the Apache Software Foundation (ASF) under one
~ or more contributor license agreements. See the NOTICE file
~ distributed with this work for additional information
~ regarding copyright ownership. The ASF licenses this file
~ to you under the Apache License, Version 2.0 (the
~ "License"); you may not use this file except in compliance
~ with the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing,
~ software distributed under the License is distributed on an
~ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
~ KIND, either express or implied. See the License for the
~ specific language governing permissions and limitations
~ under the License.
-->
Apache Druid supports SQL standard `ARRAY` typed columns for `VARCHAR`, `BIGINT`, and `DOUBLE` types (native types `ARRAY<STRING>`, `ARRAY<LONG>`, and `ARRAY<DOUBLE>`). Other more complicated ARRAY types must be stored in [nested columns](nested-columns.md). Druid ARRAY types are distinct from [multi-value dimension](multi-value-dimensions.md), which have significantly different behavior than standard arrays.
This document describes inserting, filtering, and grouping behavior for `ARRAY` typed columns.
Refer to the [Druid SQL data type documentation](sql-data-types.md#arrays) and [SQL array function reference](sql-array-functions.md) for additional details
about the functions available to use with ARRAY columns and types in SQL.
The following sections describe inserting, filtering, and grouping behavior based on the following example data, which includes 3 array typed columns:
```json lines
{"timestamp": "2023-01-01T00:00:00", "label": "row1", "arrayString": ["a", "b"], "arrayLong":[1, null,3], "arrayDouble":[1.1, 2.2, null]}
{"timestamp": "2023-01-01T00:00:00", "label": "row2", "arrayString": [null, "b"], "arrayLong":null, "arrayDouble":[999, null, 5.5]}
{"timestamp": "2023-01-01T00:00:00", "label": "row3", "arrayString": [], "arrayLong":[1, 2, 3], "arrayDouble":[null, 2.2, 1.1]}
{"timestamp": "2023-01-01T00:00:00", "label": "row4", "arrayString": ["a", "b"], "arrayLong":[1, 2, 3], "arrayDouble":[]}
{"timestamp": "2023-01-01T00:00:00", "label": "row5", "arrayString": null, "arrayLong":[], "arrayDouble":null}
```
## Ingesting arrays
### Native batch and streaming ingestion
When using native [batch](../ingestion/native-batch.md) or streaming ingestion such as with [Apache Kafka](../development/extensions-core/kafka-ingestion.md), arrays can be ingested using the [`"auto"`](../ingestion/ingestion-spec.md#dimension-objects) type dimension schema which is shared with [type-aware schema discovery](../ingestion/schema-design.md#type-aware-schema-discovery).
When ingesting from TSV or CSV data, you can specify the array delimiters using the `listDelimiter` field in the `inputFormat`. JSON data must be formatted as a JSON array to be ingested as an array type. JSON data does not require `inputFormat` configuration.
The following shows an example `dimensionsSpec` for native ingestion of the data used in this document:
```
"dimensions": [
{
"type": "auto",
"name": "label"
},
{
"type": "auto",
"name": "arrayString"
},
{
"type": "auto",
"name": "arrayLong"
},
{
"type": "auto",
"name": "arrayDouble"
}
],
```
### SQL-based ingestion
Arrays can also be inserted with [SQL-based ingestion](../multi-stage-query/index.md) when you include a query context parameter [`"arrayIngestMode":"array"`](../multi-stage-query/reference.md#context-parameters).
For example, to insert the data used in this document:
```sql
REPLACE INTO "array_example" OVERWRITE ALL
WITH "ext" AS (
SELECT *
FROM TABLE(
EXTERN(
'{"type":"inline","data":"{\"timestamp\": \"2023-01-01T00:00:00\", \"label\": \"row1\", \"arrayString\": [\"a\", \"b\"], \"arrayLong\":[1, null,3], \"arrayDouble\":[1.1, 2.2, null]}\n{\"timestamp\": \"2023-01-01T00:00:00\", \"label\": \"row2\", \"arrayString\": [null, \"b\"], \"arrayLong\":null, \"arrayDouble\":[999, null, 5.5]}\n{\"timestamp\": \"2023-01-01T00:00:00\", \"label\": \"row3\", \"arrayString\": [], \"arrayLong\":[1, 2, 3], \"arrayDouble\":[null, 2.2, 1.1]} \n{\"timestamp\": \"2023-01-01T00:00:00\", \"label\": \"row4\", \"arrayString\": [\"a\", \"b\"], \"arrayLong\":[1, 2, 3], \"arrayDouble\":[]}\n{\"timestamp\": \"2023-01-01T00:00:00\", \"label\": \"row5\", \"arrayString\": null, \"arrayLong\":[], \"arrayDouble\":null}"}',
'{"type":"json"}',
'[{"name":"timestamp", "type":"STRING"},{"name":"label", "type":"STRING"},{"name":"arrayString", "type":"ARRAY<STRING>"},{"name":"arrayLong", "type":"ARRAY<LONG>"},{"name":"arrayDouble", "type":"ARRAY<DOUBLE>"}]'
)
)
)
SELECT
TIME_PARSE("timestamp") AS "__time",
"label",
"arrayString",
"arrayLong",
"arrayDouble"
FROM "ext"
PARTITIONED BY DAY
```
### SQL-based ingestion with rollup
These input arrays can also be grouped for rollup:
```sql
REPLACE INTO "array_example_rollup" OVERWRITE ALL
WITH "ext" AS (
SELECT *
FROM TABLE(
EXTERN(
'{"type":"inline","data":"{\"timestamp\": \"2023-01-01T00:00:00\", \"label\": \"row1\", \"arrayString\": [\"a\", \"b\"], \"arrayLong\":[1, null,3], \"arrayDouble\":[1.1, 2.2, null]}\n{\"timestamp\": \"2023-01-01T00:00:00\", \"label\": \"row2\", \"arrayString\": [null, \"b\"], \"arrayLong\":null, \"arrayDouble\":[999, null, 5.5]}\n{\"timestamp\": \"2023-01-01T00:00:00\", \"label\": \"row3\", \"arrayString\": [], \"arrayLong\":[1, 2, 3], \"arrayDouble\":[null, 2.2, 1.1]} \n{\"timestamp\": \"2023-01-01T00:00:00\", \"label\": \"row4\", \"arrayString\": [\"a\", \"b\"], \"arrayLong\":[1, 2, 3], \"arrayDouble\":[]}\n{\"timestamp\": \"2023-01-01T00:00:00\", \"label\": \"row5\", \"arrayString\": null, \"arrayLong\":[], \"arrayDouble\":null}"}',
'{"type":"json"}',
'[{"name":"timestamp", "type":"STRING"},{"name":"label", "type":"STRING"},{"name":"arrayString", "type":"ARRAY<STRING>"},{"name":"arrayLong", "type":"ARRAY<LONG>"},{"name":"arrayDouble", "type":"ARRAY<DOUBLE>"}]'
)
)
)
SELECT
TIME_PARSE("timestamp") AS "__time",
"label",
"arrayString",
"arrayLong",
"arrayDouble",
COUNT(*) as "count"
FROM "ext"
GROUP BY 1,2,3,4,5
PARTITIONED BY DAY
```
## Querying arrays
### Filtering
All query types, as well as [filtered aggregators](aggregations.md#filtered-aggregator), can filter on array typed columns. Filters follow these rules for array types:
- All filters match against the entire array value for the row
- Native value filters like [equality](filters.md#equality-filter) and [range](filters.md#range-filter) match on entire array values, as do SQL constructs that plan into these native filters
- The [`IS NULL`](filters.md#null-filter) filter will match rows where the entire array value is null
- [Array specific functions](sql-array-functions.md) like `ARRAY_CONTAINS` and `ARRAY_OVERLAP` follow the behavior specified by those functions
- All other filters do not directly support ARRAY types and will result in a query error
#### Example: equality
```sql
SELECT *
FROM "array_example"
WHERE arrayLong = ARRAY[1,2,3]
```
```json lines
{"__time":"2023-01-01T00:00:00.000Z","label":"row3","arrayString":"[]","arrayLong":"[1,2,3]","arrayDouble":"[null,2.2,1.1]"}
{"__time":"2023-01-01T00:00:00.000Z","label":"row4","arrayString":"[\"a\",\"b\"]","arrayLong":"[1,2,3]","arrayDouble":"[]"}
```
#### Example: null
```sql
SELECT *
FROM "array_example"
WHERE arrayLong IS NULL
```
```json lines
{"__time":"2023-01-01T00:00:00.000Z","label":"row2","arrayString":"[null,\"b\"]","arrayLong":null,"arrayDouble":"[999.0,null,5.5]"}
```
#### Example: range
```sql
SELECT *
FROM "array_example"
WHERE arrayString >= ARRAY['a','b']
```
```json lines
{"__time":"2023-01-01T00:00:00.000Z","label":"row1","arrayString":"[\"a\",\"b\"]","arrayLong":"[1,null,3]","arrayDouble":"[1.1,2.2,null]"}
{"__time":"2023-01-01T00:00:00.000Z","label":"row4","arrayString":"[\"a\",\"b\"]","arrayLong":"[1,2,3]","arrayDouble":"[]"}
```
#### Example: ARRAY_CONTAINS
```sql
SELECT *
FROM "array_example"
WHERE ARRAY_CONTAINS(arrayString, 'a')
```
```json lines
{"__time":"2023-01-01T00:00:00.000Z","label":"row1","arrayString":"[\"a\",\"b\"]","arrayLong":"[1,null,3]","arrayDouble":"[1.1,2.2,null]"}
{"__time":"2023-01-01T00:00:00.000Z","label":"row4","arrayString":"[\"a\",\"b\"]","arrayLong":"[1,2,3]","arrayDouble":"[]"}
```
### Grouping
When grouping on an array with SQL or a native [groupBy query](groupbyquery.md), grouping follows standard SQL behavior and groups on the entire array as a single value. The [`UNNEST`](sql.md#unnest) function allows grouping on the individual array elements.
#### Example: SQL grouping query with no filtering
```sql
SELECT label, arrayString
FROM "array_example"
GROUP BY 1,2
```
results in:
```json lines
{"label":"row1","arrayString":"[\"a\",\"b\"]"}
{"label":"row2","arrayString":"[null,\"b\"]"}
{"label":"row3","arrayString":"[]"}
{"label":"row4","arrayString":"[\"a\",\"b\"]"}
{"label":"row5","arrayString":null}
```
#### Example: SQL grouping query with a filter
```sql
SELECT label, arrayString
FROM "array_example"
WHERE arrayLong = ARRAY[1,2,3]
GROUP BY 1,2
```
results:
```json lines
{"label":"row3","arrayString":"[]"}
{"label":"row4","arrayString":"[\"a\",\"b\"]"}
```
#### Example: UNNEST
```sql
SELECT label, strings
FROM "array_example" CROSS JOIN UNNEST(arrayString) as u(strings)
GROUP BY 1,2
```
results:
```json lines
{"label":"row1","strings":"a"}
{"label":"row1","strings":"b"}
{"label":"row2","strings":null}
{"label":"row2","strings":"b"}
{"label":"row4","strings":"a"}
{"label":"row4","strings":"b"}
```
## Differences between arrays and multi-value dimensions
Avoid confusing string arrays with [multi-value dimensions](multi-value-dimensions.md). Arrays and multi-value dimensions are stored in different column types, and query behavior is different. You can use the functions `MV_TO_ARRAY` and `ARRAY_TO_MV` to convert between the two if needed. In general, we recommend using arrays whenever possible, since they are a newer and more powerful feature and have SQL compliant behavior.
Use care during ingestion to ensure you get the type you want.
To get arrays when performing an ingestion using JSON ingestion specs, such as [native batch](../ingestion/native-batch.md) or streaming ingestion such as with [Apache Kafka](../development/extensions-core/kafka-ingestion.md), use dimension type `auto` or enable `useSchemaDiscovery`. When performing a [SQL-based ingestion](../multi-stage-query/index.md), write a query that generates arrays and set the context parameter `"arrayIngestMode": "array"`. Arrays may contain strings or numbers.
To get multi-value dimensions when performing an ingestion using JSON ingestion specs, use dimension type `string` and do not enable `useSchemaDiscovery`. When performing a [SQL-based ingestion](../multi-stage-query/index.md), wrap arrays in [`ARRAY_TO_MV`](multi-value-dimensions.md#sql-based-ingestion), which ensures you get multi-value dimensions in any `arrayIngestMode`. Multi-value dimensions can only contain strings.
You can tell which type you have by checking the `INFORMATION_SCHEMA.COLUMNS` table, using a query like:
```sql
SELECT COLUMN_NAME, DATA_TYPE
FROM INFORMATION_SCHEMA.COLUMNS
WHERE TABLE_NAME = 'mytable'
```
Arrays are type `ARRAY`, multi-value strings are type `VARCHAR`.

View File

@ -30,21 +30,37 @@ array of values instead of a single value, such as the `tags` values in the foll
{"timestamp": "2011-01-12T00:00:00.000Z", "tags": ["t1","t2","t3"]}
```
This document describes filtering and grouping behavior for multi-value dimensions. For information about the internal representation of multi-value dimensions, see
It is important to be aware that multi-value dimensions are distinct from [array types](arrays.md). While array types behave like standard SQL arrays, multi-value dimensions do not. This document describes the behavior of multi-value dimensions, and some additional details can be found in the [SQL data type documentation](sql-data-types.md#multi-value-strings-behavior).
This document describes inserting, filtering, and grouping behavior for multi-value dimensions. For information about the internal representation of multi-value dimensions, see
[segments documentation](../design/segments.md#multi-value-columns). Examples in this document
are in the form of [native Druid queries](querying.md). Refer to the [Druid SQL documentation](sql-multivalue-string-functions.md) for details
about using multi-value string dimensions in SQL.
are in the form of both [SQL](sql.md) and [native Druid queries](querying.md). Refer to the [Druid SQL documentation](sql-multivalue-string-functions.md) for details
about the functions available for using multi-value string dimensions in SQL.
## Overview
The following sections describe inserting, filtering, and grouping behavior based on the following example data, which includes a multi-value dimension, `tags`.
At ingestion time, Druid can detect multi-value dimensions and configure the `dimensionsSpec` accordingly. It detects JSON arrays or CSV/TSV fields as multi-value dimensions.
```json lines
{"timestamp": "2011-01-12T00:00:00.000Z", "label": "row1", "tags": ["t1","t2","t3"]}
{"timestamp": "2011-01-13T00:00:00.000Z", "label": "row2", "tags": ["t3","t4","t5"]}
{"timestamp": "2011-01-14T00:00:00.000Z", "label": "row3", "tags": ["t5","t6","t7"]}
{"timestamp": "2011-01-14T00:00:00.000Z", "label": "row4", "tags": []}
```
For TSV or CSV data, you can specify the multi-value delimiters using the `listDelimiter` field in the `parseSpec`. JSON data must be formatted as a JSON array to be ingested as a multi-value dimension. JSON data does not require `parseSpec` configuration.
## Ingestion
The following shows an example multi-value dimension named `tags` in a `dimensionsSpec`:
### Native batch and streaming ingestion
When using native [batch](../ingestion/native-batch.md) or streaming ingestion such as with [Apache Kafka](../development/extensions-core/kafka-ingestion.md), the Druid web console data loader can detect multi-value dimensions and configure the `dimensionsSpec` accordingly.
For TSV or CSV data, you can specify the multi-value delimiters using the `listDelimiter` field in the `inputFormat`. JSON data must be formatted as a JSON array to be ingested as a multi-value dimension. JSON data does not require `inputFormat` configuration.
The following shows an example `dimensionsSpec` for native ingestion of the data used in this document:
```
"dimensions": [
{
"type": "string",
"name": "label"
},
{
"type": "string",
"name": "tags",
@ -61,21 +77,82 @@ By default, Druid sorts values in multi-value dimensions. This behavior is contr
See [Dimension Objects](../ingestion/ingestion-spec.md#dimension-objects) for information on configuring multi-value handling.
### SQL-based ingestion
Multi-value dimensions can also be inserted with [SQL-based ingestion](../multi-stage-query/index.md). The functions `MV_TO_ARRAY` and `ARRAY_TO_MV` can assist in converting `VARCHAR` to `VARCHAR ARRAY` and `VARCHAR ARRAY` into `VARCHAR` respectively. `multiValueHandling` is not available when using the multi-stage query engine to insert data.
For example, to insert the data used in this document:
```sql
REPLACE INTO "mvd_example" OVERWRITE ALL
WITH "ext" AS (
SELECT *
FROM TABLE(
EXTERN(
'{"type":"inline","data":"{\"timestamp\": \"2011-01-12T00:00:00.000Z\", \"label\": \"row1\", \"tags\": [\"t1\",\"t2\",\"t3\"]}\n{\"timestamp\": \"2011-01-13T00:00:00.000Z\", \"label\": \"row2\", \"tags\": [\"t3\",\"t4\",\"t5\"]}\n{\"timestamp\": \"2011-01-14T00:00:00.000Z\", \"label\": \"row3\", \"tags\": [\"t5\",\"t6\",\"t7\"]}\n{\"timestamp\": \"2011-01-14T00:00:00.000Z\", \"label\": \"row4\", \"tags\": []}"}',
'{"type":"json"}',
'[{"name":"timestamp", "type":"STRING"},{"name":"label", "type":"STRING"},{"name":"tags", "type":"ARRAY<STRING>"}]'
)
)
)
SELECT
TIME_PARSE("timestamp") AS "__time",
"label",
ARRAY_TO_MV("tags") AS "tags"
FROM "ext"
PARTITIONED BY DAY
```
### SQL-based ingestion with rollup
These input arrays can also be grouped prior to converting into a multi-value dimension:
```sql
REPLACE INTO "mvd_example_rollup" OVERWRITE ALL
WITH "ext" AS (
SELECT *
FROM TABLE(
EXTERN(
'{"type":"inline","data":"{\"timestamp\": \"2011-01-12T00:00:00.000Z\", \"label\": \"row1\", \"tags\": [\"t1\",\"t2\",\"t3\"]}\n{\"timestamp\": \"2011-01-13T00:00:00.000Z\", \"label\": \"row2\", \"tags\": [\"t3\",\"t4\",\"t5\"]}\n{\"timestamp\": \"2011-01-14T00:00:00.000Z\", \"label\": \"row3\", \"tags\": [\"t5\",\"t6\",\"t7\"]}\n{\"timestamp\": \"2011-01-14T00:00:00.000Z\", \"label\": \"row4\", \"tags\": []}"}',
'{"type":"json"}',
'[{"name":"timestamp", "type":"STRING"},{"name":"label", "type":"STRING"},{"name":"tags", "type":"ARRAY<STRING>"}]'
)
)
)
SELECT
TIME_PARSE("timestamp") AS "__time",
"label",
ARRAY_TO_MV("tags") AS "tags",
COUNT(*) AS "count"
FROM "ext"
GROUP BY 1, 2, "tags"
PARTITIONED BY DAY
```
Notice that `ARRAY_TO_MV` is not present in the `GROUP BY` clause since we only wish to coerce the type _after_ grouping.
The `EXTERN` is also able to refer to the `tags` input type as `VARCHAR`, which is also how a query on a Druid table containing a multi-value dimension would specify the type of the `tags` column. If this is the case you must use `MV_TO_ARRAY` since the multi-stage query engine only supports grouping on multi-value dimensions as arrays. So, they must be coerced first. These arrays must then be coerced back into `VARCHAR` in the `SELECT` part of the statement with `ARRAY_TO_MV`.
```sql
REPLACE INTO "mvd_example_rollup" OVERWRITE ALL
WITH "ext" AS (
SELECT *
FROM TABLE(
EXTERN(
'{"type":"inline","data":"{\"timestamp\": \"2011-01-12T00:00:00.000Z\", \"label\": \"row1\", \"tags\": [\"t1\",\"t2\",\"t3\"]}\n{\"timestamp\": \"2011-01-13T00:00:00.000Z\", \"label\": \"row2\", \"tags\": [\"t3\",\"t4\",\"t5\"]}\n{\"timestamp\": \"2011-01-14T00:00:00.000Z\", \"label\": \"row3\", \"tags\": [\"t5\",\"t6\",\"t7\"]}\n{\"timestamp\": \"2011-01-14T00:00:00.000Z\", \"label\": \"row4\", \"tags\": []}"}',
'{"type":"json"}'
)
) EXTEND ("timestamp" VARCHAR, "label" VARCHAR, "tags" VARCHAR)
)
SELECT
TIME_PARSE("timestamp") AS "__time",
"label",
ARRAY_TO_MV(MV_TO_ARRAY("tags")) AS "tags",
COUNT(*) AS "count"
FROM "ext"
GROUP BY 1, 2, MV_TO_ARRAY("tags")
PARTITIONED BY DAY
```
## Querying multi-value dimensions
The following sections describe filtering and grouping behavior based on the following example data, which includes a multi-value dimension, `tags`.
```
{"timestamp": "2011-01-12T00:00:00.000Z", "tags": ["t1","t2","t3"]} #row1
{"timestamp": "2011-01-13T00:00:00.000Z", "tags": ["t3","t4","t5"]} #row2
{"timestamp": "2011-01-14T00:00:00.000Z", "tags": ["t5","t6","t7"]} #row3
{"timestamp": "2011-01-14T00:00:00.000Z", "tags": []} #row4
```
:::info
Be sure to remove the comments before trying out the sample data.
:::
### Filtering
All query types, as well as [filtered aggregators](aggregations.md#filtered-aggregator), can filter on multi-value
@ -88,28 +165,22 @@ dimensions. Filters follow these rules on multi-value dimensions:
- Logical expression filters behave the same way they do on single-value dimensions: "and" matches a row if all
underlying filters match that row; "or" matches a row if any underlying filters match that row; "not" matches a row
if the underlying filter does not match the row.
The following example illustrates these rules. This query applies an "or" filter to match row1 and row2 of the dataset above, but not row3:
```
{
"type": "or",
"fields": [
{
"type": "selector",
"dimension": "tags",
"value": "t1"
},
{
"type": "selector",
"dimension": "tags",
"value": "t3"
}
]
}
```sql
SELECT *
FROM "mvd_example_rollup"
WHERE tags = 't1' OR tags = 't3'
```
This "and" filter would match only row1 of the dataset above:
returns
```json lines
{"__time":"2011-01-12T00:00:00.000Z","label":"row1","tags":"[\"t1\",\"t2\",\"t3\"]","count":1}
{"__time":"2011-01-13T00:00:00.000Z","label":"row2","tags":"[\"t3\",\"t4\",\"t5\"]","count":1}
```
Native queries can also perform filtering that would be considered a "contradiction" in SQL, such as this "and" filter which would match only row1 of the dataset above:
```
{
@ -129,26 +200,73 @@ This "and" filter would match only row1 of the dataset above:
}
```
This "selector" filter would match row4 of the dataset above:
which returns
```json lines
{"__time":"2011-01-12T00:00:00.000Z","label":"row1","tags":"[\"t1\",\"t2\",\"t3\"]","count":1}
```
{
"type": "selector",
"dimension": "tags",
"value": null
}
Multi-value dimensions also consider an empty row as `null`, consider:
```sql
SELECT *
FROM "mvd_example_rollup"
WHERE tags is null
```
which results in:
```json lines
{"__time":"2011-01-14T00:00:00.000Z","label":"row4","tags":null,"count":1}
```
### Grouping
topN and groupBy queries can group on multi-value dimensions. When grouping on a multi-value dimension, _all_ values
When grouping on a multi-value dimension with SQL or a native [topN](topnquery.md) or [groupBy](groupbyquery.md) queries, _all_ values
from matching rows will be used to generate one group per value. This behaves similarly to an implicit SQL `UNNEST`
operation. This means it's possible for a query to return more groups than there are rows. For example, a topN on the
dimension `tags` with filter `"t1" AND "t3"` would match only row1, and generate a result with three groups:
`t1`, `t2`, and `t3`. If you only need to include values that match your filter, you can use a
[filtered dimensionSpec](dimensionspecs.md#filtered-dimensionspecs). This can also improve performance.
`t1`, `t2`, and `t3`.
## Example: GroupBy query with no filtering
If you only need to include values that match your filter, you can use the SQL functions [`MV_FILTER_ONLY`/`MV_FILTER_NONE`](sql-multivalue-string-functions.md),
[filtered virtual column](virtual-columns.md#list-filtered-virtual-column), or [filtered dimensionSpec](dimensionspecs.md#filtered-dimensionspecs). This can also improve performance.
#### Example: SQL grouping query with no filtering
```sql
SELECT label, tags
FROM "mvd_example_rollup"
GROUP BY 1,2
```
results in:
```json lines
{"label":"row1","tags":"t1"}
{"label":"row1","tags":"t2"}
{"label":"row1","tags":"t3"}
{"label":"row2","tags":"t3"}
{"label":"row2","tags":"t4"}
{"label":"row2","tags":"t5"}
{"label":"row3","tags":"t5"}
{"label":"row3","tags":"t6"}
{"label":"row3","tags":"t7"}
{"label":"row4","tags":null}
```
#### Example: SQL grouping query with a filter
```sql
SELECT label, tags
FROM "mvd_example_rollup"
WHERE tags = 't3'
GROUP BY 1,2
```
results:
```json lines
{"label":"row1","tags":"t1"}
{"label":"row1","tags":"t2"}
{"label":"row1","tags":"t3"}
{"label":"row2","tags":"t3"}
{"label":"row2","tags":"t4"}
{"label":"row2","tags":"t5"}
```
#### Example: native GroupBy query with no filtering
See [GroupBy querying](groupbyquery.md) for details.
@ -236,7 +354,7 @@ This query returns the following result:
Notice that original rows are "exploded" into multiple rows and merged.
## Example: GroupBy query with a selector query filter
#### Example: native GroupBy query with a selector query filter
See [query filters](filters.md) for details of selector query filter.
@ -314,11 +432,11 @@ This query returns the following result:
```
You might be surprised to see "t1", "t2", "t4" and "t5" included in the results. This is because the query filter is
applied on the row before explosion. For multi-value dimensions, a selector filter for "t3" would match row1 and row2,
applied on the row before explosion. For multi-value dimensions, a filter for value "t3" would match row1 and row2,
after which exploding is done. For multi-value dimensions, a query filter matches a row if any individual value inside
the multiple values matches the query filter.
## Example: GroupBy query with selector query and dimension filters
#### Example: native GroupBy query with selector query and dimension filters
To solve the problem above and to get only rows for "t3", use a "filtered dimension spec", as in the query below.
@ -379,7 +497,26 @@ Having specs are applied at the outermost level of groupBy query processing.
## Disable GroupBy on multi-value columns
You can disable the implicit unnesting behavior for groupBy by setting groupByEnableMultiValueUnnesting: false in your
query context. In this mode, the groupBy engine will return an error instead of completing the query. This is a safety
You can disable the implicit unnesting behavior for groupBy by setting `groupByEnableMultiValueUnnesting: false` in your
[query context](query-context.md). In this mode, the groupBy engine will return an error instead of completing the query. This is a safety
feature for situations where you believe that all dimensions are singly-valued and want the engine to reject any
multi-valued dimensions that were inadvertently included.
multi-valued dimensions that were inadvertently included.
## Differences between arrays and multi-value dimensions
Avoid confusing string arrays with [multi-value dimensions](multi-value-dimensions.md). Arrays and multi-value dimensions are stored in different column types, and query behavior is different. You can use the functions `MV_TO_ARRAY` and `ARRAY_TO_MV` to convert between the two if needed. In general, we recommend using arrays whenever possible, since they are a newer and more powerful feature and have SQL compliant behavior.
Use care during ingestion to ensure you get the type you want.
To get arrays when performing an ingestion using JSON ingestion specs, such as [native batch](../ingestion/native-batch.md) or streaming ingestion such as with [Apache Kafka](../development/extensions-core/kafka-ingestion.md), use dimension type `auto` or enable `useSchemaDiscovery`. When performing a [SQL-based ingestion](../multi-stage-query/index.md), write a query that generates arrays and set the context parameter `"arrayIngestMode": "array"`. Arrays may contain strings or numbers.
To get multi-value dimensions when performing an ingestion using JSON ingestion specs, use dimension type `string` and do not enable `useSchemaDiscovery`. When performing a [SQL-based ingestion](../multi-stage-query/index.md), wrap arrays in [`ARRAY_TO_MV`](multi-value-dimensions.md#sql-based-ingestion), which ensures you get multi-value dimensions in any `arrayIngestMode`. Multi-value dimensions can only contain strings.
You can tell which type you have by checking the `INFORMATION_SCHEMA.COLUMNS` table, using a query like:
```sql
SELECT COLUMN_NAME, DATA_TYPE
FROM INFORMATION_SCHEMA.COLUMNS
WHERE TABLE_NAME = 'mytable'
```
Arrays are type `ARRAY`, multi-value strings are type `VARCHAR`.

View File

@ -38,49 +38,54 @@ There are several post-aggregators available.
The arithmetic post-aggregator applies the provided function to the given
fields from left to right. The fields can be aggregators or other post aggregators.
Supported functions are `+`, `-`, `*`, `/`, `pow` and `quotient`.
| Property | Description | Required |
| --- | --- | --- |
| `type` | Must be `"arithmetic"`. | Yes |
| `name` | Output name of the post-aggregation | Yes |
| `fn`| Supported functions are `+`, `-`, `*`, `/`, `pow` and `quotient` | Yes |
| `fields` | List of post-aggregator specs which define inputs to the `fn` | Yes |
| `ordering` | If no ordering (or `null`) is specified, the default floating point ordering is used. `numericFirst` ordering always returns finite values first, followed by `NaN`, and infinite values last. | No |
**Note**:
* `/` division always returns `0` if dividing by`0`, regardless of the numerator.
* `quotient` division behaves like regular floating point division
* Arithmetic post-aggregators always use floating point arithmetic.
Arithmetic post-aggregators may also specify an `ordering`, which defines the order
of resulting values when sorting results (this can be useful for topN queries for instance):
- If no ordering (or `null`) is specified, the default floating point ordering is used.
- `numericFirst` ordering always returns finite values first, followed by `NaN`, and infinite values last.
The grammar for an arithmetic post aggregation is:
Example:
```json
postAggregation : {
{
"type" : "arithmetic",
"name" : <output_name>,
"fn" : <arithmetic_function>,
"fields": [<post_aggregator>, <post_aggregator>, ...],
"ordering" : <null (default), or "numericFirst">
"name" : "mult",
"fn" : "*",
"fields": [
{"type": "fieldAccess", "fieldName": "someAgg"},
{"type": "fieldAccess", "fieldName": "someOtherAgg"}
]
}
```
### Field accessor post-aggregators
These post-aggregators return the value produced by the specified [aggregator](../querying/aggregations.md).
These post-aggregators return the value produced by the specified [dimension](../querying/dimensionspecs.md) or [aggregator](../querying/aggregations.md).
`fieldName` refers to the output name of the aggregator given in the [aggregations](../querying/aggregations.md) portion of the query.
For complex aggregators, like "cardinality" and "hyperUnique", the `type` of the post-aggregator determines what
the post-aggregator will return. Use type "fieldAccess" to return the raw aggregation object, or use type
"finalizingFieldAccess" to return a finalized value, such as an estimated cardinality.
| Property | Description | Required |
| --- | --- | --- |
| `type` | Must be `"fieldAccess"` or `"finalizingFieldAccess"`. Use type `"fieldAccess"` to return the raw aggregation object, or use type `"finalizingFieldAccess"` to return a finalized value, such as an estimated cardinality. | Yes |
| `name` | Output name of the post-aggregation | Yes if defined as a standalone post-aggregation, but may be omitted if used inline to some other post-aggregator in a `fields` list |
| `fieldName` | The output name of the dimension or aggregator to reference | Yes |
Example:
```json
{ "type" : "fieldAccess", "name": <output_name>, "fieldName" : <aggregator_name> }
{ "type" : "fieldAccess", "name": "someField", "fieldName" : "someAggregator" }
```
or
```json
{ "type" : "finalizingFieldAccess", "name": <output_name>, "fieldName" : <aggregator_name> }
{ "type" : "finalizingFieldAccess", "name": "someFinalizedField", "fieldName" : "someAggregator" }
```
@ -88,29 +93,52 @@ or
The constant post-aggregator always returns the specified value.
| Property | Description | Required |
| --- | --- | --- |
| `type` | Must be `"constant"` | Yes |
| `name` | Output name of the post-aggregation | Yes |
| `value` | The constant value | Yes |
Example:
```json
{ "type" : "constant", "name" : <output_name>, "value" : <numerical_value> }
{ "type" : "constant", "name" : "someConstant", "value" : 1234 }
```
### Expression post-aggregator
The expression post-aggregator is defined using a Druid [expression](math-expr.md).
| Property | Description | Required |
| --- | --- | --- |
| `type` | Must be `"expression"` | Yes |
| `name` | Output name of the post-aggregation | Yes |
| `expression` | Native Druid [expression](math-expr.md) to compute, may refer to any dimension or aggregator output names | Yes |
| `ordering` | If no ordering (or `null`) is specified, the "natural" ordering is used. `numericFirst` ordering always returns finite values first, followed by `NaN`, and infinite values last. If the expression produces array or complex types, specify `ordering` as null and use `outputType` instead to use the correct type native ordering. | No |
| `outputType` | Output type is optional, and can be any native Druid type: `LONG`, `FLOAT`, `DOUBLE`, `STRING`, `ARRAY` types (e.g. `ARRAY<LONG>`), or `COMPLEX` types (e.g. `COMPLEX<json>`). If not specified, the output type will be inferred from the `expression`. If specified and `ordering` is null, the type native ordering will be used for sorting values. If the expression produces array or complex types, this value must be non-null to ensure the correct ordering is used. If `outputType` does not match the actual output type of the `expression`, the value will be attempted to coerced to the specified type, possibly failing if coercion is not possible. | No |
Example:
```json
{
"type": "expression",
"name": <output_name>,
"expression": <post-aggregation expression>,
"ordering" : <null (default), or "numericFirst">
"name": "someExpression",
"expression": "someAgg + someOtherAgg",
"ordering": null,
"outputType": "LONG"
}
```
### Greatest / Least post-aggregators
`doubleGreatest` and `longGreatest` computes the maximum of all fields and Double.NEGATIVE_INFINITY.
`doubleLeast` and `longLeast` computes the minimum of all fields and Double.POSITIVE_INFINITY.
| Property | Description | Required |
| --- | --- | --- |
| `type` | Must be `"doubleGreatest"`, `"doubleLeast"`, `"longGreatest"`, or `"longLeast"`. | Yes |
| `name` | Output name of the post-aggregation | Yes |
| `fields` | List of post-aggregator specs which define inputs to the greatest or least function | Yes |
The difference between the `doubleMax` aggregator and the `doubleGreatest` post-aggregator is that `doubleMax` returns the highest value of
all rows for one specific column while `doubleGreatest` returns the highest value of multiple columns in one row. These are similar to the
SQL `MAX` and `GREATEST` functions.
@ -120,8 +148,11 @@ Example:
```json
{
"type" : "doubleGreatest",
"name" : <output_name>,
"fields": [<post_aggregator>, <post_aggregator>, ...]
"name" : "theGreatest",
"fields": [
{ "type": "fieldAccess", "fieldName": "someAgg" },
{ "type": "fieldAccess", "fieldName": "someOtherAgg" }
]
}
```
@ -129,23 +160,20 @@ Example:
Applies the provided JavaScript function to the given fields. Fields are passed as arguments to the JavaScript function in the given order.
```json
postAggregation : {
"type": "javascript",
"name": <output_name>,
"fieldNames" : [<aggregator_name>, <aggregator_name>, ...],
"function": <javascript function>
}
```
Example JavaScript aggregator:
| Property | Description | Required |
| --- | --- | --- |
| `type` | Must be `"javascript"` | Yes |
| `name` | Output name of the post-aggregation | Yes |
| `fieldNames` | List of input dimension or aggregator output names | Yes |
| `function` | String javascript function which accepts `fieldNames` as arguments | Yes |
Example:
```json
{
"type": "javascript",
"name": "absPercent",
"fieldNames": ["delta", "total"],
"function": "function(delta, total) { return 100 * Math.abs(delta) / total; }"
"name": "someJavascript",
"fieldNames" : ["someAgg", "someOtherAgg"],
"function": "function(someAgg, someOtherAgg) { return 100 * Math.abs(someAgg) / someOtherAgg;"
}
```
@ -157,17 +185,25 @@ Example JavaScript aggregator:
The hyperUniqueCardinality post aggregator is used to wrap a hyperUnique object such that it can be used in post aggregations.
| Property | Description | Required |
| --- | --- | --- |
| `type` | Must be `"hyperUniqueCardinality"` | Yes |
| `name` | Output name of the post-aggregation | Yes |
| `fieldName` | The output name of a [`hyperUnique` aggregator](aggregations.md#cardinality-hyperunique) | Yes |
```json
{
"type" : "hyperUniqueCardinality",
"name": <output name>,
"fieldName" : <the name field value of the hyperUnique aggregator>
"name": "someCardinality",
"fieldName" : "someHyperunique"
}
```
It can be used in a sample calculation as so:
```json
{
...
"aggregations" : [{
{"type" : "count", "name" : "rows"},
{"type" : "hyperUnique", "name" : "unique_users", "fieldName" : "uniques"}
@ -181,6 +217,7 @@ It can be used in a sample calculation as so:
{ "type" : "fieldAccess", "name" : "rows", "fieldName" : "rows" }
]
}]
...
```
This post-aggregator will inherit the rounding behavior of the aggregator it references. Note that this inheritance

View File

@ -75,6 +75,17 @@ Casts between two SQL types that have different Druid runtime types generate a r
If a value cannot be cast to the target type, as in `CAST('foo' AS BIGINT)`, Druid a substitutes [NULL](#null-values).
When `druid.generic.useDefaultValueForNull = true` (legacy mode), Druid instead substitutes a default value, including when NULL values cast to non-nullable types. For example, if `druid.generic.useDefaultValueForNull = true`, a null VARCHAR cast to BIGINT is converted to a zero.
## Arrays
Druid supports [`ARRAY` types](arrays.md), which behave as standard SQL arrays, where results are grouped by matching entire arrays. The [`UNNEST` operator](./sql-array-functions.md#unn) can be used to perform operations on individual array elements, translating each element into a separate row.
`ARRAY` typed columns can be stored in segments with class JSON based ingestion using the 'auto' typed dimension schema shared with [schema auto-discovery](../ingestion/schema-design.md#schema-auto-discovery-for-dimensions) to detect and ingest arrays as ARRAY typed columns. For [SQL based ingestion](../multi-stage-query/index.md), the query context parameter `arrayIngestMode` must be specified as `"array"` to ingest ARRAY types. In Druid 28, the default mode for this parameter is `"mvd"` for backwards compatibility, which instead can only handle `ARRAY<STRING>` which it stores in [multi-value string columns](#multi-value-strings).
You can convert multi-value dimensions to standard SQL arrays explicitly with `MV_TO_ARRAY` or implicitly using [array functions](./sql-array-functions.md). You can also use the array functions to construct arrays from multiple columns.
Druid serializes `ARRAY` results as a JSON string of the array by default, which can be controlled by the context parameter
[`sqlStringifyArrays`](sql-query-context.md). When set to `false` and using JSON [result formats](../api-reference/sql-api.md#responses), the arrays will instead be returned as regular JSON arrays instead of in stringified form.
## Multi-value strings
Druid's native type system allows strings to have multiple values. These [multi-value string dimensions](multi-value-dimensions.md) are reported in SQL as type VARCHAR and can be
@ -86,20 +97,12 @@ You can treat multi-value string dimensions as arrays using special
Grouping by multi-value dimensions observes the native Druid multi-value aggregation behavior, which is similar to an implicit SQL UNNEST. See [Grouping](multi-value-dimensions.md#grouping) for more information.
:::info
Because the SQL planner treats multi-value dimensions as VARCHAR, there are some inconsistencies between how they are handled in Druid SQL and in native queries. For instance, expressions involving multi-value dimensions may be incorrectly optimized by the Druid SQL planner. For example, `multi_val_dim = 'a' AND multi_val_dim = 'b'` is optimized to
Because the SQL planner treats multi-value dimensions as VARCHAR, there are some inconsistencies between how they are handled in Druid SQL and in native queries. For instance, expressions involving multi-value dimensions may be incorrectly optimized by the Druid SQL planner. For example, `multi_val_dim = 'a' AND multi_val_dim = 'b'` is optimized to
`false`, even though it is possible for a single row to have both `'a'` and `'b'` as values for `multi_val_dim`.
The SQL behavior of multi-value dimensions may change in a future release to more closely align with their behavior in native queries, but the [multi-value string functions](./sql-multivalue-string-functions.md) should be able to provide nearly all possible native functionality.
The SQL behavior of multi-value dimensions may change in a future release to more closely align with their behavior in native queries, but the [multi-value string functions](./sql-multivalue-string-functions.md) should be able to provide nearly all possible native functionality.
:::
## Arrays
Druid supports ARRAY types constructed at query time. ARRAY types behave as standard SQL arrays, where results are grouped by matching entire arrays. This is in contrast to the implicit UNNEST that occurs when grouping on multi-value dimensions directly or when used with multi-value functions.
You can convert multi-value dimensions to standard SQL arrays explicitly with `MV_TO_ARRAY` or implicitly using [array functions](./sql-array-functions.md). You can also use the array functions to construct arrays from multiple columns.
You can use [schema auto-discovery](../ingestion/schema-design.md#schema-auto-discovery-for-dimensions) to detect and ingest arrays as ARRAY typed columns.
## Multi-value strings behavior
The behavior of Druid [multi-value string dimensions](multi-value-dimensions.md) varies depending on the context of
@ -125,10 +128,11 @@ separately while processing.
When converted to ARRAY or used with [array functions](./sql-array-functions.md), multi-value strings behave as standard SQL arrays and can no longer
be manipulated with non-array functions.
Druid serializes multi-value VARCHAR results as a JSON string of the array, if grouping was not applied on the value.
By default Druid serializes multi-value VARCHAR results as a JSON string of the array, if grouping was not applied on the value.
If the value was grouped, due to the implicit UNNEST behavior, all results will always be standard single value
VARCHAR. ARRAY typed results will be serialized into stringified JSON arrays if the context parameter
`sqlStringifyArrays` is set, otherwise they remain in their array format.
VARCHAR. ARRAY typed results serialization is controlled with the context parameter [`sqlStringifyArrays`](sql-query-context.md). When set
to `false` and using JSON [result formats](../api-reference/sql-api.md#responses), the arrays will instead be returned
as regular JSON arrays instead of in stringified form.
## NULL values
@ -170,7 +174,7 @@ You can interact with nested data using [JSON functions](./sql-json-functions.md
COMPLEX types have limited functionality outside the specialized functions that use them, so their behavior is undefined when:
* Grouping on complex values.
* Filtering directly on complex values, such as `WHERE json is NULL`.
* Filtering directly on complex values.
* Used as inputs to aggregators without specialized handling for a specific complex type.
In many cases, functions are provided to translate COMPLEX value types to STRING, which serves as a workaround solution until COMPLEX type functionality can be improved.

View File

@ -87,7 +87,7 @@ documentation.
## UNNEST
The UNNEST clause unnests array values. It's the SQL equivalent to the [unnest datasource](./datasource.md#unnest). The source for UNNEST can be an array or an input that's been transformed into an array, such as with helper functions like MV_TO_ARRAY or ARRAY.
The UNNEST clause unnests ARRAY typed values. The source for UNNEST can be an array type column, or an input that's been transformed into an array, such as with helper functions like [`MV_TO_ARRAY`](./sql-multivalue-string-functions.md) or [`ARRAY`](./sql-array-functions.md).
The following is the general syntax for UNNEST, specifically a query that returns the column that gets unnested:
@ -98,7 +98,7 @@ SELECT column_alias_name FROM datasource CROSS JOIN UNNEST(source_expression1) A
* The `datasource` for UNNEST can be any Druid datasource, such as the following:
* A table, such as `FROM a_table`.
* A subset of a table based on a query, a filter, or a JOIN. For example, `FROM (SELECT columnA,columnB,columnC from a_table)`.
* The `source_expression` for the UNNEST function must be an array and can come from any expression. If the dimension you are unnesting is a multi-value dimension, you have to specify `MV_TO_ARRAY(dimension)` to convert it to an implicit ARRAY type. You can also specify any expression that has an SQL array datatype. For example, you can call UNNEST on the following:
* The `source_expression` for the UNNEST function must be an array and can come from any expression. UNNEST works directly on Druid ARRAY typed columns. If the column you are unnesting is a multi-value VARCHAR, you must specify `MV_TO_ARRAY(dimension)` to convert it to an ARRAY type. You can also specify any expression that has an SQL array datatype. For example, you can call UNNEST on the following:
* `ARRAY[dim1,dim2]` if you want to make an array out of two dimensions.
* `ARRAY_CONCAT(dim1,dim2)` if you want to concatenate two multi-value dimensions.
* The `AS table_alias_name(column_alias_name)` clause is not required but is highly recommended. Use it to specify the output, which can be an existing column or a new one. Replace `table_alias_name` and `column_alias_name` with a table and column name you want to alias the unnested results to. If you don't provide this, Druid uses a nondescriptive name, such as `EXPR$0`.
@ -115,8 +115,9 @@ For examples, see the [Unnest arrays tutorial](../tutorials/tutorial-unnest-arra
The UNNEST function has the following limitations:
- The function does not remove any duplicates or nulls in an array. Nulls will be treated as any other value in an array. If there are multiple nulls within the array, a record corresponding to each of the nulls gets created.
- Arrays inside complex JSON types are not supported.
- You cannot perform an UNNEST at ingestion time, including SQL-based ingestion using the MSQ task engine.
- Arrays of complex objects inside complex JSON types are not supported.
UNNEST is the SQL equivalent of the [unnest datasource](./datasource.md#unnest).
## WHERE

View File

@ -49,7 +49,6 @@ import org.apache.druid.query.aggregation.datasketches.hll.HllSketchToEstimateWi
import org.apache.druid.query.aggregation.datasketches.hll.HllSketchToStringPostAggregator;
import org.apache.druid.query.aggregation.datasketches.hll.HllSketchUnionPostAggregator;
import org.apache.druid.query.aggregation.post.ArithmeticPostAggregator;
import org.apache.druid.query.aggregation.post.ExpressionPostAggregator;
import org.apache.druid.query.aggregation.post.FieldAccessPostAggregator;
import org.apache.druid.query.aggregation.post.FinalizingFieldAccessPostAggregator;
import org.apache.druid.query.dimension.DefaultDimensionSpec;
@ -187,7 +186,10 @@ public class HllSketchSqlAggregatorTest extends BaseCalciteQueryTest
private static final List<AggregatorFactory> EXPECTED_FILTERED_AGGREGATORS =
EXPECTED_PA_AGGREGATORS.stream()
.limit(5)
.map(factory -> new FilteredAggregatorFactory(factory, equality("dim2", "a", ColumnType.STRING)))
.map(factory -> new FilteredAggregatorFactory(
factory,
equality("dim2", "a", ColumnType.STRING)
))
.collect(Collectors.toList());
/**
@ -198,15 +200,15 @@ public class HllSketchSqlAggregatorTest extends BaseCalciteQueryTest
ImmutableList.of(
new HllSketchToEstimatePostAggregator("p1", new FieldAccessPostAggregator("p0", "a0"), false),
new HllSketchToEstimatePostAggregator("p3", new FieldAccessPostAggregator("p2", "a0"), false),
new ExpressionPostAggregator("p4", "(\"p3\" + 1)", null, TestExprMacroTable.INSTANCE),
expressionPostAgg("p4", "(\"p3\" + 1)", ColumnType.DOUBLE),
new HllSketchToEstimatePostAggregator("p6", new FieldAccessPostAggregator("p5", "a3"), false),
new HllSketchToEstimatePostAggregator("p8", new FieldAccessPostAggregator("p7", "a0"), false),
new ExpressionPostAggregator("p9", "abs(\"p8\")", null, TestExprMacroTable.INSTANCE),
expressionPostAgg("p9", "abs(\"p8\")", ColumnType.DOUBLE),
new HllSketchToEstimateWithBoundsPostAggregator("p11", new FieldAccessPostAggregator("p10", "a0"), 2),
new HllSketchToEstimateWithBoundsPostAggregator("p13", new FieldAccessPostAggregator("p12", "a0"), 1),
new HllSketchToStringPostAggregator("p15", new FieldAccessPostAggregator("p14", "a0")),
new HllSketchToStringPostAggregator("p17", new FieldAccessPostAggregator("p16", "a0")),
new ExpressionPostAggregator("p18", "upper(\"p17\")", null, TestExprMacroTable.INSTANCE),
expressionPostAgg("p18", "upper(\"p17\")", ColumnType.STRING),
new HllSketchToEstimatePostAggregator("p20", new FieldAccessPostAggregator("p19", "a0"), true)
);
@ -726,41 +728,37 @@ public class HllSketchSqlAggregatorTest extends BaseCalciteQueryTest
)
)
.aggregators(
ImmutableList.of(
new HllSketchBuildAggregatorFactory("a0", "dim2", null, null, null, true, true),
new HllSketchBuildAggregatorFactory("a1", "m1", null, null, null, true, true),
new HllSketchBuildAggregatorFactory("a2", "v0", null, null, null, true, true),
new HllSketchBuildAggregatorFactory("a3", "v1", null, null, null, true, true),
new HllSketchBuildAggregatorFactory("a4", "dim2", null, null, null, true, true)
)
new HllSketchBuildAggregatorFactory("a0", "dim2", null, null, null, true, true),
new HllSketchBuildAggregatorFactory("a1", "m1", null, null, null, true, true),
new HllSketchBuildAggregatorFactory("a2", "v0", null, null, null, true, true),
new HllSketchBuildAggregatorFactory("a3", "v1", null, null, null, true, true),
new HllSketchBuildAggregatorFactory("a4", "dim2", null, null, null, true, true)
)
.postAggregators(
ImmutableList.of(
new HllSketchToEstimatePostAggregator("p1", new FieldAccessPostAggregator("p0", "a0"), false),
new HllSketchToEstimatePostAggregator("p3", new FieldAccessPostAggregator("p2", "a0"), false),
new ExpressionPostAggregator("p4", "(\"p3\" + 1)", null, TestExprMacroTable.INSTANCE),
new HllSketchToEstimatePostAggregator("p6", new FieldAccessPostAggregator("p5", "a2"), false),
new HllSketchToEstimatePostAggregator(
"p8",
new FieldAccessPostAggregator("p7", "a0"),
false
),
new ExpressionPostAggregator("p9", "abs(\"p8\")", null, TestExprMacroTable.INSTANCE),
new HllSketchToEstimateWithBoundsPostAggregator(
"p11",
new FieldAccessPostAggregator("p10", "a0"),
2
),
new HllSketchToEstimateWithBoundsPostAggregator(
"p13",
new FieldAccessPostAggregator("p12", "a0"),
1
),
new HllSketchToStringPostAggregator("p15", new FieldAccessPostAggregator("p14", "a0")),
new HllSketchToStringPostAggregator("p17", new FieldAccessPostAggregator("p16", "a0")),
new ExpressionPostAggregator("p18", "upper(\"p17\")", null, TestExprMacroTable.INSTANCE),
new HllSketchToEstimatePostAggregator("p20", new FieldAccessPostAggregator("p19", "a0"), true)
)
new HllSketchToEstimatePostAggregator("p1", new FieldAccessPostAggregator("p0", "a0"), false),
new HllSketchToEstimatePostAggregator("p3", new FieldAccessPostAggregator("p2", "a0"), false),
expressionPostAgg("p4", "(\"p3\" + 1)", ColumnType.DOUBLE),
new HllSketchToEstimatePostAggregator("p6", new FieldAccessPostAggregator("p5", "a2"), false),
new HllSketchToEstimatePostAggregator(
"p8",
new FieldAccessPostAggregator("p7", "a0"),
false
),
expressionPostAgg("p9", "abs(\"p8\")", ColumnType.DOUBLE),
new HllSketchToEstimateWithBoundsPostAggregator(
"p11",
new FieldAccessPostAggregator("p10", "a0"),
2
),
new HllSketchToEstimateWithBoundsPostAggregator(
"p13",
new FieldAccessPostAggregator("p12", "a0"),
1
),
new HllSketchToStringPostAggregator("p15", new FieldAccessPostAggregator("p14", "a0")),
new HllSketchToStringPostAggregator("p17", new FieldAccessPostAggregator("p16", "a0")),
expressionPostAgg("p18", "upper(\"p17\")", ColumnType.STRING),
new HllSketchToEstimatePostAggregator("p20", new FieldAccessPostAggregator("p19", "a0"), true)
)
.context(queryContext)
.build()

View File

@ -25,7 +25,6 @@ import com.google.inject.Injector;
import org.apache.druid.common.config.NullHandling;
import org.apache.druid.guice.DruidInjectorBuilder;
import org.apache.druid.java.util.common.granularity.Granularities;
import org.apache.druid.math.expr.ExprMacroTable;
import org.apache.druid.query.Druids;
import org.apache.druid.query.QueryDataSource;
import org.apache.druid.query.QueryRunnerFactoryConglomerate;
@ -44,7 +43,6 @@ import org.apache.druid.query.aggregation.datasketches.quantiles.DoublesSketchTo
import org.apache.druid.query.aggregation.datasketches.quantiles.DoublesSketchToRankPostAggregator;
import org.apache.druid.query.aggregation.datasketches.quantiles.DoublesSketchToStringPostAggregator;
import org.apache.druid.query.aggregation.post.ArithmeticPostAggregator;
import org.apache.druid.query.aggregation.post.ExpressionPostAggregator;
import org.apache.druid.query.aggregation.post.FieldAccessPostAggregator;
import org.apache.druid.query.dimension.DefaultDimensionSpec;
import org.apache.druid.query.expression.TestExprMacroTable;
@ -538,11 +536,10 @@ public class DoublesSketchSqlAggregatorTest extends BaseCalciteQueryTest
makeFieldAccessPostAgg("a1:agg"),
0.5f
),
new ExpressionPostAggregator(
expressionPostAgg(
"p0",
"(\"a1\" + 1)",
null,
TestExprMacroTable.INSTANCE
ColumnType.DOUBLE
),
new DoublesSketchToQuantilePostAggregator(
"p2",
@ -552,11 +549,10 @@ public class DoublesSketchSqlAggregatorTest extends BaseCalciteQueryTest
),
0.5f
),
new ExpressionPostAggregator(
expressionPostAgg(
"p3",
"(\"p2\" + 1000)",
null,
TestExprMacroTable.INSTANCE
ColumnType.DOUBLE
),
new DoublesSketchToQuantilePostAggregator(
"p5",
@ -566,11 +562,10 @@ public class DoublesSketchSqlAggregatorTest extends BaseCalciteQueryTest
),
0.5f
),
new ExpressionPostAggregator(
expressionPostAgg(
"p6",
"(\"p5\" + 1000)",
null,
TestExprMacroTable.INSTANCE
ColumnType.DOUBLE
),
new DoublesSketchToQuantilePostAggregator(
"p8",
@ -580,7 +575,7 @@ public class DoublesSketchSqlAggregatorTest extends BaseCalciteQueryTest
),
0.5f
),
new ExpressionPostAggregator("p9", "abs(\"p8\")", null, TestExprMacroTable.INSTANCE),
expressionPostAgg("p9", "abs(\"p8\")", ColumnType.DOUBLE),
new DoublesSketchToQuantilesPostAggregator(
"p11",
new FieldAccessPostAggregator(
@ -629,13 +624,12 @@ public class DoublesSketchSqlAggregatorTest extends BaseCalciteQueryTest
"a2:agg"
)
),
new ExpressionPostAggregator(
expressionPostAgg(
"p22",
"replace(replace(\"p21\",'HeapCompactDoublesSketch','HeapUpdateDoublesSketch'),"
+ "'Combined Buffer Capacity : 6',"
+ "'Combined Buffer Capacity : 8')",
null,
ExprMacroTable.nil()
ColumnType.STRING
)
)
.context(QUERY_CONTEXT_DEFAULT)

View File

@ -35,7 +35,6 @@ import org.apache.druid.query.aggregation.datasketches.tuple.ArrayOfDoublesSketc
import org.apache.druid.query.aggregation.datasketches.tuple.ArrayOfDoublesSketchOperations;
import org.apache.druid.query.aggregation.datasketches.tuple.ArrayOfDoublesSketchSetOpPostAggregator;
import org.apache.druid.query.aggregation.datasketches.tuple.ArrayOfDoublesSketchToMetricsSumEstimatePostAggregator;
import org.apache.druid.query.aggregation.post.ExpressionPostAggregator;
import org.apache.druid.query.aggregation.post.FieldAccessPostAggregator;
import org.apache.druid.query.dimension.DefaultDimensionSpec;
import org.apache.druid.query.groupby.GroupByQuery;
@ -295,29 +294,26 @@ public class ArrayOfDoublesSketchSqlAggregatorTest extends BaseCalciteQueryTest
)
)
.postAggregators(
ImmutableList.of(
new ArrayOfDoublesSketchToMetricsSumEstimatePostAggregator(
"p1",
new FieldAccessPostAggregator("p0", "a1")
),
new ArrayOfDoublesSketchToMetricsSumEstimatePostAggregator(
"p5",
new ArrayOfDoublesSketchSetOpPostAggregator(
"p4",
"INTERSECT",
128,
null,
ImmutableList.of(
new ExpressionPostAggregator(
"p2",
"complex_decode_base64('arrayOfDoublesSketch',"
+ expectedBase64Constant
+ ")",
null,
queryFramework().macroTable()
),
new FieldAccessPostAggregator("p3", "a1")
)
new ArrayOfDoublesSketchToMetricsSumEstimatePostAggregator(
"p1",
new FieldAccessPostAggregator("p0", "a1")
),
new ArrayOfDoublesSketchToMetricsSumEstimatePostAggregator(
"p5",
new ArrayOfDoublesSketchSetOpPostAggregator(
"p4",
"INTERSECT",
128,
null,
ImmutableList.of(
expressionPostAgg(
"p2",
"complex_decode_base64('arrayOfDoublesSketch',"
+ expectedBase64Constant
+ ")",
ColumnType.ofComplex("arrayOfDoublesSketch")
),
new FieldAccessPostAggregator("p3", "a1")
)
)
)
@ -391,7 +387,7 @@ public class ArrayOfDoublesSketchSqlAggregatorTest extends BaseCalciteQueryTest
ImmutableList.of(
new ArrayOfDoublesSketchToMetricsSumEstimatePostAggregator(
"p1",
new ExpressionPostAggregator("p0", "null", null, queryFramework().macroTable())
expressionPostAgg("p0", "null", null)
),
new ArrayOfDoublesSketchSetOpPostAggregator(
"p4",
@ -399,8 +395,8 @@ public class ArrayOfDoublesSketchSqlAggregatorTest extends BaseCalciteQueryTest
null,
null,
ImmutableList.of(
new ExpressionPostAggregator("p2", "null", null, queryFramework().macroTable()),
new ExpressionPostAggregator("p3", "null", null, queryFramework().macroTable())
expressionPostAgg("p2", "null", null),
expressionPostAgg("p3", "null", null)
)
),
new ArrayOfDoublesSketchSetOpPostAggregator(
@ -409,7 +405,7 @@ public class ArrayOfDoublesSketchSqlAggregatorTest extends BaseCalciteQueryTest
null,
null,
ImmutableList.of(
new ExpressionPostAggregator("p5", "null", null, queryFramework().macroTable()),
expressionPostAgg("p5", "null", null),
new FieldAccessPostAggregator("p6", "a1")
)
),
@ -420,7 +416,7 @@ public class ArrayOfDoublesSketchSqlAggregatorTest extends BaseCalciteQueryTest
null,
ImmutableList.of(
new FieldAccessPostAggregator("p8", "a1"),
new ExpressionPostAggregator("p9", "null", null, queryFramework().macroTable())
expressionPostAgg("p9", "null", null)
)
)
)

View File

@ -91,7 +91,7 @@ public class DimensionSchemaUtils
if (elementType == ValueType.STRING) {
if (arrayIngestMode == ArrayIngestMode.NONE) {
throw InvalidInput.exception(
"String arrays can not be ingested when '%s' is set to '%s'. Either set '%s' in query context "
"String arrays can not be ingested when '%s' is set to '%s'. Set '%s' in query context "
+ "to 'array' to ingest the string array as an array, or ingest it as an MVD by explicitly casting the "
+ "array to an MVD with ARRAY_TO_MV function.",
MultiStageQueryContext.CTX_ARRAY_INGEST_MODE,

View File

@ -777,6 +777,76 @@ public class MSQInsertTest extends MSQTestBase
.verifyResults();
}
@Test
public void testInsertOnFoo1WithArrayIngestModeArrayGroupByInsertAsArray()
{
RowSignature rowSignature = RowSignature.builder()
.add("__time", ColumnType.LONG)
.add("dim3", ColumnType.STRING_ARRAY).build();
final Map<String, Object> adjustedContext = new HashMap<>(context);
adjustedContext.put(MultiStageQueryContext.CTX_ARRAY_INGEST_MODE, "array");
testIngestQuery().setSql(
"INSERT INTO foo1 SELECT MV_TO_ARRAY(dim3) as dim3 FROM foo GROUP BY 1 PARTITIONED BY ALL TIME"
)
.setExpectedDataSource("foo1")
.setExpectedRowSignature(rowSignature)
.setQueryContext(adjustedContext)
.setExpectedSegment(ImmutableSet.of(SegmentId.of("foo1", Intervals.ETERNITY, "test", 0)))
.setExpectedResultRows(
NullHandling.replaceWithDefault() ?
ImmutableList.of(
new Object[]{0L, null},
new Object[]{0L, new Object[]{"a", "b"}},
new Object[]{0L, new Object[]{"b", "c"}},
new Object[]{0L, new Object[]{"d"}}
) : ImmutableList.of(
new Object[]{0L, null},
new Object[]{0L, new Object[]{"a", "b"}},
new Object[]{0L, new Object[]{""}},
new Object[]{0L, new Object[]{"b", "c"}},
new Object[]{0L, new Object[]{"d"}}
)
)
.verifyResults();
}
@Test
public void testInsertOnFoo1WithArrayIngestModeArrayGroupByInsertAsMvd()
{
RowSignature rowSignature = RowSignature.builder()
.add("__time", ColumnType.LONG)
.add("dim3", ColumnType.STRING).build();
final Map<String, Object> adjustedContext = new HashMap<>(context);
adjustedContext.put(MultiStageQueryContext.CTX_ARRAY_INGEST_MODE, "array");
testIngestQuery().setSql(
"INSERT INTO foo1 SELECT ARRAY_TO_MV(MV_TO_ARRAY(dim3)) as dim3 FROM foo GROUP BY MV_TO_ARRAY(dim3) PARTITIONED BY ALL TIME"
)
.setExpectedDataSource("foo1")
.setExpectedRowSignature(rowSignature)
.setQueryContext(adjustedContext)
.setExpectedSegment(ImmutableSet.of(SegmentId.of("foo1", Intervals.ETERNITY, "test", 0)))
.setExpectedResultRows(
NullHandling.replaceWithDefault() ?
ImmutableList.of(
new Object[]{0L, null},
new Object[]{0L, Arrays.asList("a", "b")},
new Object[]{0L, Arrays.asList("b", "c")},
new Object[]{0L, "d"}
) : ImmutableList.of(
new Object[]{0L, null},
new Object[]{0L, ""},
new Object[]{0L, Arrays.asList("a", "b")},
new Object[]{0L, Arrays.asList("b", "c")},
new Object[]{0L, "d"}
)
)
.verifyResults();
}
@Test
public void testInsertOnFoo1WithMultiValueDimGroupByWithoutGroupByEnable()
{

View File

@ -34,7 +34,6 @@ import org.apache.druid.java.util.common.Intervals;
import org.apache.druid.java.util.common.StringUtils;
import org.apache.druid.java.util.common.granularity.Granularities;
import org.apache.druid.math.expr.ExprEval;
import org.apache.druid.math.expr.ExprMacroTable;
import org.apache.druid.msq.indexing.MSQSpec;
import org.apache.druid.msq.indexing.MSQTuningConfig;
import org.apache.druid.msq.indexing.destination.DurableStorageMSQDestination;
@ -58,7 +57,6 @@ import org.apache.druid.query.aggregation.DoubleSumAggregatorFactory;
import org.apache.druid.query.aggregation.FilteredAggregatorFactory;
import org.apache.druid.query.aggregation.cardinality.CardinalityAggregatorFactory;
import org.apache.druid.query.aggregation.post.ArithmeticPostAggregator;
import org.apache.druid.query.aggregation.post.ExpressionPostAggregator;
import org.apache.druid.query.aggregation.post.FieldAccessPostAggregator;
import org.apache.druid.query.dimension.DefaultDimensionSpec;
import org.apache.druid.query.expression.TestExprMacroTable;
@ -1825,14 +1823,14 @@ public class MSQSelectTest extends MSQTestBase
)
)
)
.setAggregatorSpecs(aggregators(new CountAggregatorFactory(
"a0")))
.setAggregatorSpecs(
aggregators(new CountAggregatorFactory("a0"))
)
.setPostAggregatorSpecs(
ImmutableList.of(new ExpressionPostAggregator(
"p0",
"mv_to_array(\"d0\")",
null, ExprMacroTable.nil()
)
expressionPostAgg(
"p0",
"mv_to_array(\"d0\")",
ColumnType.STRING_ARRAY
)
)
.setContext(localContext)
@ -2216,11 +2214,10 @@ public class MSQSelectTest extends MSQTestBase
.setQuerySegmentSpec(querySegmentSpec(Intervals.ETERNITY))
.setGranularity(Granularities.ALL)
.setPostAggregatorSpecs(
ImmutableList.of(new ExpressionPostAggregator(
"a0",
"1",
null, ExprMacroTable.nil()
)
expressionPostAgg(
"a0",
"1",
ColumnType.LONG
)
)
.build()

View File

@ -31,7 +31,9 @@ import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps;
import org.apache.druid.java.util.common.guava.Comparators;
import org.apache.druid.math.expr.Expr;
import org.apache.druid.math.expr.ExprEval;
import org.apache.druid.math.expr.ExprMacroTable;
import org.apache.druid.math.expr.ExprType;
import org.apache.druid.math.expr.ExpressionType;
import org.apache.druid.math.expr.InputBindings;
import org.apache.druid.math.expr.Parser;
@ -40,6 +42,8 @@ import org.apache.druid.query.aggregation.PostAggregator;
import org.apache.druid.query.cache.CacheKeyBuilder;
import org.apache.druid.segment.ColumnInspector;
import org.apache.druid.segment.column.ColumnType;
import org.apache.druid.segment.column.ValueType;
import org.apache.druid.segment.virtual.ExpressionSelectors;
import javax.annotation.Nullable;
import java.util.Comparator;
@ -74,6 +78,12 @@ public class ExpressionPostAggregator implements PostAggregator
private final Supplier<Set<String>> dependentFields;
private final Supplier<byte[]> cacheKey;
@Nullable
private final ColumnType outputType;
@Nullable
private final ExpressionType expressionType;
/**
* Constructor for deserialization.
*/
@ -82,6 +92,7 @@ public class ExpressionPostAggregator implements PostAggregator
@JsonProperty("name") String name,
@JsonProperty("expression") String expression,
@JsonProperty("ordering") @Nullable String ordering,
@JsonProperty("outputType") @Nullable ColumnType outputType,
@JacksonInject ExprMacroTable macroTable
)
{
@ -89,6 +100,7 @@ public class ExpressionPostAggregator implements PostAggregator
name,
expression,
ordering,
outputType,
Parser.lazyParse(expression, macroTable)
);
}
@ -100,6 +112,7 @@ public class ExpressionPostAggregator implements PostAggregator
final String name,
final String expression,
@Nullable final String ordering,
@Nullable final ColumnType outputType,
final Expr parsed
)
{
@ -107,6 +120,7 @@ public class ExpressionPostAggregator implements PostAggregator
name,
expression,
ordering,
outputType,
() -> parsed
);
}
@ -118,6 +132,7 @@ public class ExpressionPostAggregator implements PostAggregator
final String name,
final String expression,
@Nullable final String ordering,
@Nullable final ColumnType outputType,
final Supplier<Expr> parsed
)
{
@ -125,6 +140,7 @@ public class ExpressionPostAggregator implements PostAggregator
name,
expression,
ordering,
outputType,
ImmutableMap.of(),
InputBindings.nilBindings(),
parsed,
@ -136,6 +152,7 @@ public class ExpressionPostAggregator implements PostAggregator
final String name,
final String expression,
@Nullable final String ordering,
@Nullable final ColumnType outputType,
final Map<String, Function<Object, Object>> finalizers,
final Expr.InputBindingInspector partialTypeInformation,
final Supplier<Expr> parsed,
@ -147,19 +164,24 @@ public class ExpressionPostAggregator implements PostAggregator
this.name = name;
this.expression = expression;
this.ordering = ordering;
// comparator should be specialized to output type ... someday
this.comparator = ordering == null ? DEFAULT_COMPARATOR : Ordering.valueOf(ordering);
this.outputType = outputType;
this.expressionType = outputType != null ? ExpressionType.fromColumnTypeStrict(outputType) : null;
if (outputType != null && ordering == null) {
this.comparator = outputType.getNullableStrategy();
} else {
this.comparator = ordering == null ? DEFAULT_COMPARATOR : Ordering.valueOf(ordering);
}
this.finalizers = finalizers;
this.partialTypeInformation = partialTypeInformation;
this.parsed = parsed;
this.dependentFields = dependentFields;
this.cacheKey = Suppliers.memoize(() -> {
return new CacheKeyBuilder(PostAggregatorIds.EXPRESSION)
.appendCacheable(parsed.get())
.appendString(ordering)
.build();
});
this.cacheKey = Suppliers.memoize(() -> new CacheKeyBuilder(PostAggregatorIds.EXPRESSION)
.appendCacheable(parsed.get())
.appendString(ordering)
.appendString(outputType != null ? outputType.asTypeString() : null)
.build()
);
}
@Override
@ -188,7 +210,22 @@ public class ExpressionPostAggregator implements PostAggregator
// we use partialTypeInformation to avoid unnecessarily coercing aggregator values for which we do have type info
// from decoration
return parsed.get().eval(InputBindings.forMap(finalizedValues, partialTypeInformation)).valueOrDefault();
final ExprEval<?> eval = parsed.get().eval(InputBindings.forMap(finalizedValues, partialTypeInformation));
if (expressionType == null) {
return eval.valueOrDefault();
}
// outputType cannot be null if expressionType is not null
if (outputType.is(ValueType.FLOAT) && !eval.isNumericNull()) {
return (float) eval.asDouble();
}
if (eval.type().equals(expressionType)) {
return eval.valueOrDefault();
}
if (expressionType.is(ExprType.STRING) && eval.isArray()) {
return ExpressionSelectors.coerceEvalToObjectOrList(eval);
}
// coerce to expected type
return eval.castTo(expressionType).valueOrDefault();
}
@Override
@ -201,6 +238,10 @@ public class ExpressionPostAggregator implements PostAggregator
@Override
public ColumnType getType(ColumnInspector signature)
{
if (outputType != null) {
return outputType;
}
// no output type specified, use type inference
final ExpressionType type = parsed.get().getOutputType(signature);
if (type == null) {
return null;
@ -222,6 +263,7 @@ public class ExpressionPostAggregator implements PostAggregator
name,
expression,
ordering,
outputType,
finalizers,
InputBindings.inspectorFromTypeMap(types),
parsed,
@ -243,6 +285,14 @@ public class ExpressionPostAggregator implements PostAggregator
return ordering;
}
@Nullable
@JsonProperty("outputType")
@JsonInclude(JsonInclude.Include.NON_NULL)
public ColumnType getOutputType()
{
return outputType;
}
@Override
public String toString()
{
@ -250,6 +300,7 @@ public class ExpressionPostAggregator implements PostAggregator
"name='" + name + '\'' +
", expression='" + expression + '\'' +
", ordering=" + ordering +
", outputType=" + outputType +
'}';
}
@ -265,7 +316,7 @@ public class ExpressionPostAggregator implements PostAggregator
* Ensures the following order: numeric > NaN > Infinite.
*
* The name may be referenced via Ordering.valueOf(String) in the constructor {@link
* ExpressionPostAggregator#ExpressionPostAggregator(String, String, String, Map, Expr.InputBindingInspector, Supplier, Supplier)}.
* ExpressionPostAggregator#ExpressionPostAggregator(String, String, String, ColumnType, Map, Expr.InputBindingInspector, Supplier, Supplier)}.
*/
@SuppressWarnings("unused")
numericFirst {
@ -316,12 +367,12 @@ public class ExpressionPostAggregator implements PostAggregator
return false;
}
return true;
return Objects.equals(outputType, that.outputType);
}
@Override
public int hashCode()
{
return Objects.hash(name, expression, comparator, ordering);
return Objects.hash(name, expression, comparator, ordering, outputType);
}
}

View File

@ -1188,6 +1188,13 @@ public class GroupByQuery extends BaseQuery<ResultRow>
return this;
}
public Builder setPostAggregatorSpecs(PostAggregator... postAggregatorSpecs)
{
this.postAggregatorSpecs = Lists.newArrayList(postAggregatorSpecs);
this.postProcessingFn = null;
return this;
}
public Builder setContext(Map<String, Object> context)
{
this.context = context;

View File

@ -75,22 +75,22 @@ public class ArithmeticPostAggregatorTest extends InitializedNullHandlingTest
}
arithmeticPostAggregator = new ArithmeticPostAggregator("add", "+", postAggregatorList);
expressionPostAggregator = new ExpressionPostAggregator("add", "roku + rows", null, TestExprMacroTable.INSTANCE);
expressionPostAggregator = new ExpressionPostAggregator("add", "roku + rows", null, null, TestExprMacroTable.INSTANCE);
Assert.assertEquals(9.0, arithmeticPostAggregator.compute(metricValues));
Assert.assertEquals(9.0, expressionPostAggregator.compute(metricValues));
arithmeticPostAggregator = new ArithmeticPostAggregator("subtract", "-", postAggregatorList);
expressionPostAggregator = new ExpressionPostAggregator("add", "roku - rows", null, TestExprMacroTable.INSTANCE);
expressionPostAggregator = new ExpressionPostAggregator("add", "roku - rows", null, null, TestExprMacroTable.INSTANCE);
Assert.assertEquals(3.0, arithmeticPostAggregator.compute(metricValues));
Assert.assertEquals(3.0, expressionPostAggregator.compute(metricValues));
arithmeticPostAggregator = new ArithmeticPostAggregator("multiply", "*", postAggregatorList);
expressionPostAggregator = new ExpressionPostAggregator("add", "roku * rows", null, TestExprMacroTable.INSTANCE);
expressionPostAggregator = new ExpressionPostAggregator("add", "roku * rows", null, null, TestExprMacroTable.INSTANCE);
Assert.assertEquals(18.0, arithmeticPostAggregator.compute(metricValues));
Assert.assertEquals(18.0, expressionPostAggregator.compute(metricValues));
arithmeticPostAggregator = new ArithmeticPostAggregator("divide", "/", postAggregatorList);
expressionPostAggregator = new ExpressionPostAggregator("add", "roku / rows", null, TestExprMacroTable.INSTANCE);
expressionPostAggregator = new ExpressionPostAggregator("add", "roku / rows", null, null, TestExprMacroTable.INSTANCE);
Assert.assertEquals(2.0, arithmeticPostAggregator.compute(metricValues));
Assert.assertEquals(2.0, expressionPostAggregator.compute(metricValues));
}

View File

@ -21,6 +21,7 @@ package org.apache.druid.query.aggregation.post;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableList;
import nl.jqno.equalsverifier.EqualsVerifier;
import org.apache.druid.java.util.common.granularity.Granularities;
import org.apache.druid.math.expr.SettableObjectBinding;
@ -49,6 +50,24 @@ public class ExpressionPostAggregatorTest extends InitializedNullHandlingTest
"p0",
"2 + 3",
null,
null,
TestExprMacroTable.INSTANCE
);
Assert.assertEquals(
postAgg,
JSON_MAPPER.readValue(JSON_MAPPER.writeValueAsString(postAgg), ExpressionPostAggregator.class)
);
}
@Test
public void testSerdeOutputType() throws JsonProcessingException
{
ExpressionPostAggregator postAgg = new ExpressionPostAggregator(
"p0",
"2 + 3",
null,
ColumnType.LONG,
TestExprMacroTable.INSTANCE
);
@ -63,7 +82,7 @@ public class ExpressionPostAggregatorTest extends InitializedNullHandlingTest
{
EqualsVerifier.forClass(ExpressionPostAggregator.class)
.usingGetClass()
.withIgnoredFields("finalizers", "parsed", "dependentFields", "cacheKey", "partialTypeInformation")
.withIgnoredFields("finalizers", "parsed", "dependentFields", "cacheKey", "partialTypeInformation", "expressionType")
.verify();
}
@ -74,6 +93,7 @@ public class ExpressionPostAggregatorTest extends InitializedNullHandlingTest
"p0",
"x + y",
null,
null,
TestExprMacroTable.INSTANCE
);
@ -90,6 +110,142 @@ public class ExpressionPostAggregatorTest extends InitializedNullHandlingTest
Assert.assertEquals(5.0, postAgg.compute(binding.asMap()));
}
@Test
public void testExplicitOutputTypeAndCompute()
{
ExpressionPostAggregator postAgg = new ExpressionPostAggregator(
"p0",
"x + y",
null,
ColumnType.FLOAT,
TestExprMacroTable.INSTANCE
);
RowSignature signature = RowSignature.builder()
.add("x", ColumnType.LONG)
.add("y", ColumnType.FLOAT)
.build();
SettableObjectBinding binding = new SettableObjectBinding().withBinding("x", 2L)
.withBinding("y", 3.0);
Assert.assertEquals(ColumnType.FLOAT, postAgg.getType(signature));
Assert.assertEquals(5.0f, postAgg.compute(binding.asMap()));
}
@Test
public void testExplicitOutputTypeAndComputeComparison()
{
ExpressionPostAggregator postAgg = new ExpressionPostAggregator(
"p0",
"array(x, y)",
null,
ColumnType.LONG_ARRAY,
TestExprMacroTable.INSTANCE
);
RowSignature signature = RowSignature.builder()
.add("x", ColumnType.LONG)
.add("y", ColumnType.LONG)
.build();
SettableObjectBinding binding = new SettableObjectBinding().withBinding("x", 2L)
.withBinding("y", 3L);
SettableObjectBinding binding2 = new SettableObjectBinding().withBinding("x", 3L)
.withBinding("y", 4L);
Assert.assertEquals(ColumnType.LONG_ARRAY, postAgg.getType(signature));
Assert.assertArrayEquals(new Object[]{2L, 3L}, (Object[]) postAgg.compute(binding.asMap()));
Assert.assertArrayEquals(new Object[]{3L, 4L}, (Object[]) postAgg.compute(binding2.asMap()));
Assert.assertEquals(
-1,
postAgg.getComparator().compare(postAgg.compute(binding.asMap()), postAgg.compute(binding2.asMap()))
);
}
@Test
public void testExplicitOutputTypeAndComputeArrayNoType()
{
ExpressionPostAggregator postAgg = new ExpressionPostAggregator(
"p0",
"array(x, y)",
null,
null,
TestExprMacroTable.INSTANCE
);
RowSignature signature = RowSignature.builder()
.add("x", ColumnType.STRING)
.add("y", ColumnType.STRING)
.build();
SettableObjectBinding binding = new SettableObjectBinding().withBinding("x", "abc")
.withBinding("y", "def");
Assert.assertEquals(ColumnType.STRING_ARRAY, postAgg.getType(signature));
Assert.assertArrayEquals(new Object[]{"abc", "def"}, (Object[]) postAgg.compute(binding.asMap()));
SettableObjectBinding binding2 = new SettableObjectBinding().withBinding("x", "abc")
.withBinding("y", "abc");
// ordering by arrays doesn't work if no outputType is specified...
Assert.assertThrows(
ClassCastException.class,
() -> postAgg.getComparator().compare(postAgg.compute(binding.asMap()), postAgg.compute(binding2.asMap()))
);
}
@Test
public void testExplicitOutputTypeAndComputeMultiValueDimension()
{
ExpressionPostAggregator postAgg = new ExpressionPostAggregator(
"p0",
"array(x, y)",
null,
ColumnType.STRING,
TestExprMacroTable.INSTANCE
);
RowSignature signature = RowSignature.builder()
.add("x", ColumnType.STRING)
.add("y", ColumnType.STRING)
.build();
SettableObjectBinding binding = new SettableObjectBinding().withBinding("x", "abc")
.withBinding("y", "def");
Assert.assertEquals(ColumnType.STRING, postAgg.getType(signature));
Assert.assertEquals(ImmutableList.of("abc", "def"), postAgg.compute(binding.asMap()));
}
@Test
public void testExplicitOutputTypeAndComputeMultiValueDimensionWithSingleElement()
{
ExpressionPostAggregator postAgg = new ExpressionPostAggregator(
"p0",
"array(x)",
null,
ColumnType.STRING,
TestExprMacroTable.INSTANCE
);
RowSignature signature = RowSignature.builder()
.add("x", ColumnType.STRING)
.build();
SettableObjectBinding binding = new SettableObjectBinding().withBinding("x", "abc");
Assert.assertEquals(ColumnType.STRING, postAgg.getType(signature));
Assert.assertEquals("abc", postAgg.compute(binding.asMap()));
}
@Test
public void testNilOutputType()
{
@ -97,6 +253,7 @@ public class ExpressionPostAggregatorTest extends InitializedNullHandlingTest
"p0",
"x + y",
null,
null,
TestExprMacroTable.INSTANCE
);
@ -120,10 +277,11 @@ public class ExpressionPostAggregatorTest extends InitializedNullHandlingTest
new FloatSumAggregatorFactory("float", "col2")
)
.postAggregators(
new ExpressionPostAggregator("a", "double + float", null, TestExprMacroTable.INSTANCE),
new ExpressionPostAggregator("b", "count + count", null, TestExprMacroTable.INSTANCE),
new ExpressionPostAggregator("c", "count + double", null, TestExprMacroTable.INSTANCE),
new ExpressionPostAggregator("d", "float + float", null, TestExprMacroTable.INSTANCE)
new ExpressionPostAggregator("a", "double + float", null, null, TestExprMacroTable.INSTANCE),
new ExpressionPostAggregator("b", "count + count", null, null, TestExprMacroTable.INSTANCE),
new ExpressionPostAggregator("c", "count + double", null, null, TestExprMacroTable.INSTANCE),
new ExpressionPostAggregator("d", "float + float", null, null, TestExprMacroTable.INSTANCE),
new ExpressionPostAggregator("e", "float + float", null, ColumnType.FLOAT, TestExprMacroTable.INSTANCE)
)
.build();
@ -137,6 +295,7 @@ public class ExpressionPostAggregatorTest extends InitializedNullHandlingTest
.add("b", ColumnType.LONG)
.add("c", ColumnType.DOUBLE)
.add("d", ColumnType.DOUBLE) // floats don't exist in expressions
.add("e", ColumnType.FLOAT) // but can be explicitly specified
.build(),
new TimeseriesQueryQueryToolChest().resultArraySignature(query)
);

View File

@ -106,7 +106,7 @@ public class GroupByQueryQueryToolChestTest extends InitializedNullHandlingTest
.setAggregatorSpecs(QueryRunnerTestHelper.ROWS_COUNT, new LongSumAggregatorFactory("idx", "index"))
.setPostAggregatorSpecs(
ImmutableList.of(
new ExpressionPostAggregator("post", "alias + 'x'", null, TestExprMacroTable.INSTANCE)
new ExpressionPostAggregator("post", "alias + 'x'", null, null, TestExprMacroTable.INSTANCE)
)
)
.setGranularity(QueryRunnerTestHelper.DAY_GRAN)
@ -120,7 +120,7 @@ public class GroupByQueryQueryToolChestTest extends InitializedNullHandlingTest
.setAggregatorSpecs(QueryRunnerTestHelper.ROWS_COUNT, new LongSumAggregatorFactory("idx", "index"))
.setPostAggregatorSpecs(
ImmutableList.of(
new ExpressionPostAggregator("post", "alias - 'x'", null, TestExprMacroTable.INSTANCE)
new ExpressionPostAggregator("post", "alias - 'x'", null, null, TestExprMacroTable.INSTANCE)
)
)
.setGranularity(QueryRunnerTestHelper.DAY_GRAN)
@ -152,7 +152,7 @@ public class GroupByQueryQueryToolChestTest extends InitializedNullHandlingTest
.setAggregatorSpecs(QueryRunnerTestHelper.ROWS_COUNT, new LongSumAggregatorFactory("idx", "index"))
.setPostAggregatorSpecs(
ImmutableList.of(
new ExpressionPostAggregator("post", "alias + 'x'", null, TestExprMacroTable.INSTANCE)
new ExpressionPostAggregator("post", "alias + 'x'", null, null, TestExprMacroTable.INSTANCE)
)
)
.setGranularity(QueryRunnerTestHelper.DAY_GRAN)
@ -174,7 +174,7 @@ public class GroupByQueryQueryToolChestTest extends InitializedNullHandlingTest
.setAggregatorSpecs(QueryRunnerTestHelper.ROWS_COUNT, new LongSumAggregatorFactory("idx", "index"))
.setPostAggregatorSpecs(
ImmutableList.of(
new ExpressionPostAggregator("post", "alias - 'x'", null, TestExprMacroTable.INSTANCE)
new ExpressionPostAggregator("post", "alias - 'x'", null, null, TestExprMacroTable.INSTANCE)
)
)
.setGranularity(QueryRunnerTestHelper.DAY_GRAN)
@ -214,7 +214,7 @@ public class GroupByQueryQueryToolChestTest extends InitializedNullHandlingTest
.setAggregatorSpecs(QueryRunnerTestHelper.ROWS_COUNT, new LongSumAggregatorFactory("idx", "index"))
.setPostAggregatorSpecs(
ImmutableList.of(
new ExpressionPostAggregator("post", "alias + 'x'", null, TestExprMacroTable.INSTANCE)
new ExpressionPostAggregator("post", "alias + 'x'", null, null, TestExprMacroTable.INSTANCE)
)
)
.setGranularity(QueryRunnerTestHelper.DAY_GRAN)
@ -237,7 +237,7 @@ public class GroupByQueryQueryToolChestTest extends InitializedNullHandlingTest
.setAggregatorSpecs(QueryRunnerTestHelper.ROWS_COUNT, new LongSumAggregatorFactory("idx", "index"))
.setPostAggregatorSpecs(
ImmutableList.of(
new ExpressionPostAggregator("post", "alias + 'x'", null, TestExprMacroTable.INSTANCE)
new ExpressionPostAggregator("post", "alias + 'x'", null, null, TestExprMacroTable.INSTANCE)
)
)
.setGranularity(QueryRunnerTestHelper.DAY_GRAN)
@ -300,7 +300,7 @@ public class GroupByQueryQueryToolChestTest extends InitializedNullHandlingTest
.setAggregatorSpecs(QueryRunnerTestHelper.ROWS_COUNT, new LongSumAggregatorFactory("idx", "index"))
.setPostAggregatorSpecs(
ImmutableList.of(
new ExpressionPostAggregator("post", "alias + 'x'", null, TestExprMacroTable.INSTANCE)
new ExpressionPostAggregator("post", "alias + 'x'", null, null, TestExprMacroTable.INSTANCE)
)
)
.setGranularity(QueryRunnerTestHelper.DAY_GRAN)
@ -323,7 +323,7 @@ public class GroupByQueryQueryToolChestTest extends InitializedNullHandlingTest
.setAggregatorSpecs(QueryRunnerTestHelper.ROWS_COUNT, new LongSumAggregatorFactory("idx", "index"))
.setPostAggregatorSpecs(
ImmutableList.of(
new ExpressionPostAggregator("post", "alias + 'x'", null, TestExprMacroTable.INSTANCE)
new ExpressionPostAggregator("post", "alias + 'x'", null, null, TestExprMacroTable.INSTANCE)
)
)
.setGranularity(QueryRunnerTestHelper.DAY_GRAN)
@ -393,7 +393,7 @@ public class GroupByQueryQueryToolChestTest extends InitializedNullHandlingTest
.setAggregatorSpecs(QueryRunnerTestHelper.ROWS_COUNT, new LongSumAggregatorFactory("idx", "index"))
.setPostAggregatorSpecs(
ImmutableList.of(
new ExpressionPostAggregator("post", "alias + 'x'", null, TestExprMacroTable.INSTANCE)
new ExpressionPostAggregator("post", "alias + 'x'", null, null, TestExprMacroTable.INSTANCE)
)
)
.setGranularity(QueryRunnerTestHelper.DAY_GRAN)
@ -416,7 +416,7 @@ public class GroupByQueryQueryToolChestTest extends InitializedNullHandlingTest
.setAggregatorSpecs(QueryRunnerTestHelper.ROWS_COUNT, new LongSumAggregatorFactory("idx", "index"))
.setPostAggregatorSpecs(
ImmutableList.of(
new ExpressionPostAggregator("post", "alias + 'x'", null, TestExprMacroTable.INSTANCE)
new ExpressionPostAggregator("post", "alias + 'x'", null, null, TestExprMacroTable.INSTANCE)
)
)
.setGranularity(QueryRunnerTestHelper.DAY_GRAN)

View File

@ -742,7 +742,7 @@ public class GroupByQueryRunnerTest extends InitializedNullHandlingTest
.setAggregatorSpecs(QueryRunnerTestHelper.ROWS_COUNT, new LongSumAggregatorFactory("idx", "index"))
.setPostAggregatorSpecs(
ImmutableList.of(
new ExpressionPostAggregator("post", "alias + 'x'", null, TestExprMacroTable.INSTANCE)
new ExpressionPostAggregator("post", "alias + 'x'", null, null, TestExprMacroTable.INSTANCE)
)
)
.setGranularity(QueryRunnerTestHelper.DAY_GRAN)
@ -5343,7 +5343,7 @@ public class GroupByQueryRunnerTest extends InitializedNullHandlingTest
// Same query, but with expressions instead of arithmetic.
final GroupByQuery expressionQuery = query.withPostAggregatorSpecs(
Collections.singletonList(
new ExpressionPostAggregator("rows_times_10", "rows * 10.0", null, TestExprMacroTable.INSTANCE)
new ExpressionPostAggregator("rows_times_10", "rows * 10.0", null, null, TestExprMacroTable.INSTANCE)
)
);
@ -12964,7 +12964,7 @@ public class GroupByQueryRunnerTest extends InitializedNullHandlingTest
)
.setPostAggregatorSpecs(
ImmutableList.of(
new ExpressionPostAggregator("post", "idx * 2", null, TestExprMacroTable.INSTANCE)))
new ExpressionPostAggregator("post", "idx * 2", null, null, TestExprMacroTable.INSTANCE)))
.setGranularity(QueryRunnerTestHelper.ALL_GRAN)
.build();

View File

@ -521,6 +521,7 @@ public class TopNQueryRunnerTest extends InitializedNullHandlingTest
"dimPostAgg",
"market + 'x'",
null,
null,
TestExprMacroTable.INSTANCE
)
)
@ -727,6 +728,7 @@ public class TopNQueryRunnerTest extends InitializedNullHandlingTest
QueryRunnerTestHelper.HYPER_UNIQUE_FINALIZING_POST_AGG_METRIC,
"uniques + 1",
null,
null,
TestExprMacroTable.INSTANCE
)
)
@ -781,6 +783,7 @@ public class TopNQueryRunnerTest extends InitializedNullHandlingTest
QueryRunnerTestHelper.HYPER_UNIQUE_FINALIZING_POST_AGG_METRIC,
"uniques + 1",
null,
null,
TestExprMacroTable.INSTANCE
)
)

View File

@ -77,7 +77,7 @@ public class LiteralSqlAggregator implements SqlAggregator
return Aggregation.create(
ImmutableList.of(),
new ExpressionPostAggregator(name, expr.getExpression(), null, plannerContext.getExprMacroTable())
new ExpressionPostAggregator(name, expr.getExpression(), null, expr.getDruidType(), plannerContext.getExprMacroTable())
);
}
}

View File

@ -296,6 +296,7 @@ public class OperatorConversions
postAggregatorVisitor.getOutputNamePrefix() + postAggregatorVisitor.getAndIncrementCounter(),
druidExpression.getExpression(),
null,
druidExpression.getDruidType(),
plannerContext.parseExpression(druidExpression.getExpression())
);
}

View File

@ -175,6 +175,7 @@ public class Projection
postAggregatorVisitor.getOutputNamePrefix() + postAggregatorVisitor.getAndIncrementCounter(),
postAggregatorExpression.getExpression(),
null,
postAggregatorExpression.getDruidType(),
plannerContext.parseExpression(postAggregatorExpression.getExpression())
);
postAggregatorVisitor.addPostAgg(postAggregator);

View File

@ -602,9 +602,9 @@ public class BaseCalciteQueryTest extends CalciteTestBase
return StringUtils.format("(%s == %s)", left.getExpression(), right.getExpression());
}
public static ExpressionPostAggregator expressionPostAgg(final String name, final String expression)
public static ExpressionPostAggregator expressionPostAgg(final String name, final String expression, ColumnType outputType)
{
return new ExpressionPostAggregator(name, expression, null, CalciteTests.createExprMacroTable());
return new ExpressionPostAggregator(name, expression, null, outputType, CalciteTests.createExprMacroTable());
}
public static Druids.ScanQueryBuilder newScanQueryBuilder()

View File

@ -50,7 +50,6 @@ import org.apache.druid.query.aggregation.DoubleSumAggregatorFactory;
import org.apache.druid.query.aggregation.ExpressionLambdaAggregatorFactory;
import org.apache.druid.query.aggregation.FilteredAggregatorFactory;
import org.apache.druid.query.aggregation.LongSumAggregatorFactory;
import org.apache.druid.query.aggregation.post.ExpressionPostAggregator;
import org.apache.druid.query.dimension.DefaultDimensionSpec;
import org.apache.druid.query.expression.TestExprMacroTable;
import org.apache.druid.query.extraction.SubstringDimExtractionFn;
@ -2599,7 +2598,7 @@ public class CalciteArraysQueryTest extends BaseCalciteQueryTest
)
)
.postAggregators(
expressionPostAgg("p0", "array_quantile(\"a0\",0.9)")
expressionPostAgg("p0", "array_quantile(\"a0\",0.9)", ColumnType.DOUBLE)
)
.context(QUERY_CONTEXT_DEFAULT)
.build()
@ -2973,7 +2972,7 @@ public class CalciteArraysQueryTest extends BaseCalciteQueryTest
)
)
)
.postAggregators(expressionPostAgg("p0", "array_to_string(\"a0\",',')"))
.postAggregators(expressionPostAgg("p0", "array_to_string(\"a0\",',')", ColumnType.STRING))
.context(QUERY_CONTEXT_DEFAULT)
.build()
),
@ -3017,7 +3016,7 @@ public class CalciteArraysQueryTest extends BaseCalciteQueryTest
)
)
)
.postAggregators(expressionPostAgg("p0", "array_to_string(\"a0\",',')"))
.postAggregators(expressionPostAgg("p0", "array_to_string(\"a0\",',')", ColumnType.STRING))
.context(QUERY_CONTEXT_DEFAULT)
.build()
),
@ -3209,7 +3208,7 @@ public class CalciteArraysQueryTest extends BaseCalciteQueryTest
.intervals(querySegmentSpec(Filtration.eternity()))
.granularity(Granularities.ALL)
.context(QUERY_CONTEXT_NO_STRINGIFY_ARRAY)
.postAggregators(new ExpressionPostAggregator("s0", "1", null, ExprMacroTable.nil()))
.postAggregators(expressionPostAgg("s0", "1", ColumnType.LONG))
.build()
),
useDefault ?

View File

@ -61,7 +61,6 @@ import org.apache.druid.query.aggregation.LongSumAggregatorFactory;
import org.apache.druid.query.aggregation.any.StringAnyAggregatorFactory;
import org.apache.druid.query.aggregation.cardinality.CardinalityAggregatorFactory;
import org.apache.druid.query.aggregation.post.ArithmeticPostAggregator;
import org.apache.druid.query.aggregation.post.ExpressionPostAggregator;
import org.apache.druid.query.aggregation.post.FieldAccessPostAggregator;
import org.apache.druid.query.dimension.DefaultDimensionSpec;
import org.apache.druid.query.dimension.ExtractionDimensionSpec;
@ -2604,7 +2603,7 @@ public class CalciteJoinQueryTest extends BaseCalciteQueryTest
.intervals(querySegmentSpec(Filtration.eternity()))
.granularity(Granularities.ALL)
.aggregators(new LongMaxAggregatorFactory("a0", "__time"))
.postAggregators(expressionPostAgg("p0", "1"))
.postAggregators(expressionPostAgg("p0", "1", ColumnType.LONG))
.context(QUERY_CONTEXT_DEFAULT)
.build()
),
@ -4008,7 +4007,7 @@ public class CalciteJoinQueryTest extends BaseCalciteQueryTest
.intervals(querySegmentSpec(Filtration.eternity()))
.granularity(Granularities.ALL)
.aggregators(new LongMinAggregatorFactory("a0", "__time"))
.postAggregators(expressionPostAgg("p0", "1"))
.postAggregators(expressionPostAgg("p0", "1", ColumnType.LONG))
.context(QUERY_CONTEXT_DEFAULT)
.build()
),
@ -4671,7 +4670,7 @@ public class CalciteJoinQueryTest extends BaseCalciteQueryTest
new DefaultDimensionSpec("dim1", "d0")
)
)
.setPostAggregatorSpecs(ImmutableList.of(new ExpressionPostAggregator("a0", "1", null, ExprMacroTable.nil())))
.setPostAggregatorSpecs(expressionPostAgg("a0", "1", ColumnType.LONG))
.setContext(queryContext)
.build()
),
@ -5475,12 +5474,9 @@ public class CalciteJoinQueryTest extends BaseCalciteQueryTest
new DefaultDimensionSpec("dim2", "d0", ColumnType.STRING)
)
.setGranularity(Granularities.ALL)
.setPostAggregatorSpecs(ImmutableList.of(new ExpressionPostAggregator(
"a0",
"1",
null,
ExprMacroTable.nil()
)))
.setPostAggregatorSpecs(
expressionPostAgg("a0", "1", ColumnType.LONG)
)
.setLimitSpec(NoopLimitSpec.instance())
.build()
),
@ -5555,12 +5551,9 @@ public class CalciteJoinQueryTest extends BaseCalciteQueryTest
.setDimensions(
new DefaultDimensionSpec("dim2", "d0", ColumnType.STRING)
)
.setPostAggregatorSpecs(ImmutableList.of(new ExpressionPostAggregator(
"a0",
"1",
null,
ExprMacroTable.nil()
)))
.setPostAggregatorSpecs(
expressionPostAgg("a0", "1", ColumnType.LONG)
)
.setGranularity(Granularities.ALL)
.setLimitSpec(NoopLimitSpec.instance())
.build()
@ -5846,7 +5839,9 @@ public class CalciteJoinQueryTest extends BaseCalciteQueryTest
.setDimensions(
new DefaultDimensionSpec("m1", "d0", ColumnType.FLOAT)
)
.setPostAggregatorSpecs(ImmutableList.of(new ExpressionPostAggregator("a0", "1", null, ExprMacroTable.nil())))
.setPostAggregatorSpecs(
expressionPostAgg("a0", "1", ColumnType.LONG)
)
.build()
),
"j0.",
@ -5899,7 +5894,9 @@ public class CalciteJoinQueryTest extends BaseCalciteQueryTest
.setDimensions(
new DefaultDimensionSpec("m1", "d0", ColumnType.FLOAT)
)
.setPostAggregatorSpecs(ImmutableList.of(new ExpressionPostAggregator("a0", "1", null, ExprMacroTable.nil())))
.setPostAggregatorSpecs(
expressionPostAgg("a0", "1", ColumnType.LONG)
)
.build()
),
"_j0.",

View File

@ -1112,7 +1112,7 @@ public class CalciteMultiValueStringQueryTest extends BaseCalciteQueryTest
.setDataSource(CalciteTests.DATASOURCE3)
.setInterval(querySegmentSpec(Filtration.eternity()))
.setGranularity(Granularities.ALL)
.setPostAggregatorSpecs(ImmutableList.of(expressionPostAgg("p0", "string_to_array('a,b',',')")))
.setPostAggregatorSpecs(expressionPostAgg("p0", "string_to_array('a,b',',')", ColumnType.STRING))
.setDimensions(dimensions(new DefaultDimensionSpec("m1", "_d0", ColumnType.FLOAT)))
.setContext(QUERY_CONTEXT_DEFAULT)
.build()
@ -1214,7 +1214,7 @@ public class CalciteMultiValueStringQueryTest extends BaseCalciteQueryTest
),
new CountAggregatorFactory("a1")
)
.postAggregators(expressionPostAgg("p0", "string_to_array(\"a0\",',')"))
.postAggregators(expressionPostAgg("p0", "string_to_array(\"a0\",',')", ColumnType.STRING))
.context(QUERY_CONTEXT_DEFAULT)
.build()
)

View File

@ -129,7 +129,7 @@ public class CalciteParameterQueryTest extends BaseCalciteQueryTest
new DoubleSumAggregatorFactory("a1", "m2")
))
.postAggregators(
expressionPostAgg("p0", "(exp(\"a0\") + 10)")
expressionPostAgg("p0", "(exp(\"a0\") + 10)", ColumnType.DOUBLE)
)
.context(QUERY_CONTEXT_DEFAULT)
.build()),
@ -166,17 +166,17 @@ public class CalciteParameterQueryTest extends BaseCalciteQueryTest
.granularity(Granularities.ALL)
.aggregators(aggregators(new CountAggregatorFactory("a0")))
.postAggregators(
expressionPostAgg("p0", "'foo'"),
expressionPostAgg("p1", "'xfoo'"),
expressionPostAgg("p2", "'foo'"),
expressionPostAgg("p3", "' foo'"),
expressionPostAgg("p4", "'foo'"),
expressionPostAgg("p5", "'foo'"),
expressionPostAgg("p6", "'foo'"),
expressionPostAgg("p7", "'foo '"),
expressionPostAgg("p8", "'foox'"),
expressionPostAgg("p9", "' foo'"),
expressionPostAgg("p10", "'xfoo'")
expressionPostAgg("p0", "'foo'", ColumnType.STRING),
expressionPostAgg("p1", "'xfoo'", ColumnType.STRING),
expressionPostAgg("p2", "'foo'", ColumnType.STRING),
expressionPostAgg("p3", "' foo'", ColumnType.STRING),
expressionPostAgg("p4", "'foo'", ColumnType.STRING),
expressionPostAgg("p5", "'foo'", ColumnType.STRING),
expressionPostAgg("p6", "'foo'", ColumnType.STRING),
expressionPostAgg("p7", "'foo '", ColumnType.STRING),
expressionPostAgg("p8", "'foox'", ColumnType.STRING),
expressionPostAgg("p9", "' foo'", ColumnType.STRING),
expressionPostAgg("p10", "'xfoo'", ColumnType.STRING)
)
.context(QUERY_CONTEXT_DEFAULT)
.build()
@ -317,7 +317,7 @@ public class CalciteParameterQueryTest extends BaseCalciteQueryTest
new CountAggregatorFactory("a1")
))
.setPostAggregatorSpecs(ImmutableList.of(
expressionPostAgg("p0", "(\"a0\" / \"a1\")")
expressionPostAgg("p0", "(\"a0\" / \"a1\")", ColumnType.LONG)
))
.setHavingSpec(having(expressionFilter("((\"a0\" / \"a1\") == 1)")))
.setContext(QUERY_CONTEXT_DEFAULT)
@ -359,7 +359,7 @@ public class CalciteParameterQueryTest extends BaseCalciteQueryTest
.intervals(querySegmentSpec(Filtration.eternity()))
.granularity(Granularities.ALL)
.aggregators(aggregators(new DoubleSumAggregatorFactory("a0", "m1")))
.postAggregators(ImmutableList.of(expressionPostAgg("p0", "(\"a0\" / 10)")))
.postAggregators(expressionPostAgg("p0", "(\"a0\" / 10)", ColumnType.DOUBLE))
.context(QUERY_CONTEXT_DEFAULT)
.build()
),
@ -402,7 +402,7 @@ public class CalciteParameterQueryTest extends BaseCalciteQueryTest
new DoubleSumAggregatorFactory("a1", "m2")
))
.postAggregators(
expressionPostAgg("p0", "(exp(\"a0\") + 10)")
expressionPostAgg("p0", "(exp(\"a0\") + 10)", ColumnType.DOUBLE)
)
.context(QUERY_CONTEXT_DEFAULT)
.build()),
@ -439,7 +439,7 @@ public class CalciteParameterQueryTest extends BaseCalciteQueryTest
new DoubleSumAggregatorFactory("a1", "m2")
))
.postAggregators(
expressionPostAgg("p0", "(exp(\"a0\") + 10)")
expressionPostAgg("p0", "(exp(\"a0\") + 10)", ColumnType.DOUBLE)
)
.context(QUERY_CONTEXT_DEFAULT)
.build()),
@ -476,7 +476,7 @@ public class CalciteParameterQueryTest extends BaseCalciteQueryTest
new DoubleSumAggregatorFactory("a1", "m2")
))
.postAggregators(
expressionPostAgg("p0", "(exp(\"a0\") + 10)")
expressionPostAgg("p0", "(exp(\"a0\") + 10)", ColumnType.DOUBLE)
)
.context(QUERY_CONTEXT_DEFAULT)
.build()),

View File

@ -75,7 +75,6 @@ import org.apache.druid.query.aggregation.last.FloatLastAggregatorFactory;
import org.apache.druid.query.aggregation.last.LongLastAggregatorFactory;
import org.apache.druid.query.aggregation.last.StringLastAggregatorFactory;
import org.apache.druid.query.aggregation.post.ArithmeticPostAggregator;
import org.apache.druid.query.aggregation.post.ExpressionPostAggregator;
import org.apache.druid.query.aggregation.post.FieldAccessPostAggregator;
import org.apache.druid.query.aggregation.post.FinalizingFieldAccessPostAggregator;
import org.apache.druid.query.dimension.DefaultDimensionSpec;
@ -2675,9 +2674,9 @@ public class CalciteQueryTest extends BaseCalciteQueryTest
),
new CountAggregatorFactory("a1")
))
.setPostAggregatorSpecs(ImmutableList.of(
expressionPostAgg("p0", "(\"a0\" / \"a1\")")
))
.setPostAggregatorSpecs(
expressionPostAgg("p0", "(\"a0\" / \"a1\")", ColumnType.LONG)
)
.setHavingSpec(having(expressionFilter("((\"a0\" / \"a1\") == 1)")))
.setContext(QUERY_CONTEXT_DEFAULT)
.build()
@ -2712,7 +2711,7 @@ public class CalciteQueryTest extends BaseCalciteQueryTest
.setGranularity(Granularities.ALL)
.setDimensions(dimensions(new DefaultDimensionSpec("dim1", "d0")))
.setPostAggregatorSpecs(ImmutableList.of(
expressionPostAgg("p0", "substring(\"d0\", 1, -1)")
expressionPostAgg("p0", "substring(\"d0\", 1, -1)", ColumnType.STRING)
))
.setContext(QUERY_CONTEXT_DEFAULT)
.build()
@ -2746,8 +2745,8 @@ public class CalciteQueryTest extends BaseCalciteQueryTest
.setGranularity(Granularities.ALL)
.setDimensions(dimensions(new DefaultDimensionSpec("dim1", "d0")))
.setPostAggregatorSpecs(ImmutableList.of(
expressionPostAgg("p0", "substring(\"d0\", 1, -1)"),
expressionPostAgg("p1", "strlen(\"d0\")")
expressionPostAgg("p0", "substring(\"d0\", 1, -1)", ColumnType.STRING),
expressionPostAgg("p1", "strlen(\"d0\")", ColumnType.LONG)
))
.setLimitSpec(
DefaultLimitSpec
@ -2796,7 +2795,7 @@ public class CalciteQueryTest extends BaseCalciteQueryTest
.intervals(querySegmentSpec(Filtration.eternity()))
.granularity(Granularities.ALL)
.dimension(new DefaultDimensionSpec("dim1", "d0"))
.postAggregators(expressionPostAgg("s0", "substring(\"d0\", 1, -1)"))
.postAggregators(expressionPostAgg("s0", "substring(\"d0\", 1, -1)", ColumnType.STRING))
.metric(new DimensionTopNMetricSpec(null, StringComparators.LEXICOGRAPHIC))
.threshold(10)
.context(QUERY_CONTEXT_DEFAULT)
@ -2832,8 +2831,8 @@ public class CalciteQueryTest extends BaseCalciteQueryTest
.granularity(Granularities.ALL)
.dimension(new DefaultDimensionSpec("dim1", "d0"))
.postAggregators(
expressionPostAgg("p0", "substring(\"d0\", 1, -1)"),
expressionPostAgg("p1", "strlen(\"d0\")")
expressionPostAgg("p0", "substring(\"d0\", 1, -1)", ColumnType.STRING),
expressionPostAgg("p1", "strlen(\"d0\")", ColumnType.LONG)
)
.metric(new NumericTopNMetricSpec("p1"))
.threshold(10)
@ -2957,7 +2956,7 @@ public class CalciteQueryTest extends BaseCalciteQueryTest
.intervals(querySegmentSpec(Filtration.eternity()))
.granularity(Granularities.ALL)
.aggregators(aggregators(new DoubleSumAggregatorFactory("a0", "m1")))
.postAggregators(ImmutableList.of(expressionPostAgg("p0", "(\"a0\" / 10)")))
.postAggregators(expressionPostAgg("p0", "(\"a0\" / 10)", ColumnType.DOUBLE))
.context(QUERY_CONTEXT_DEFAULT)
.build()
),
@ -4679,7 +4678,8 @@ public class CalciteQueryTest extends BaseCalciteQueryTest
),
expressionPostAgg(
"p0",
useDefault ? "((\"a3\" + \"a4\") + \"a5\")" : "((\"a4\" + \"a5\") + \"a6\")"
useDefault ? "((\"a3\" + \"a4\") + \"a5\")" : "((\"a4\" + \"a5\") + \"a6\")",
ColumnType.LONG
)
)
.context(QUERY_CONTEXT_DEFAULT)
@ -4714,7 +4714,7 @@ public class CalciteQueryTest extends BaseCalciteQueryTest
new FloatMinAggregatorFactory("a0", "m1"),
new FloatMaxAggregatorFactory("a1", "m1")
)
.postAggregators(expressionPostAgg("p0", "(\"a0\" + \"a1\")"))
.postAggregators(expressionPostAgg("p0", "(\"a0\" + \"a1\")", ColumnType.FLOAT))
.threshold(3)
.context(QUERY_CONTEXT_DEFAULT)
.build()
@ -4746,7 +4746,7 @@ public class CalciteQueryTest extends BaseCalciteQueryTest
new FloatMinAggregatorFactory("a0", "m1"),
new FloatMaxAggregatorFactory("a1", "m1")
)
.setPostAggregatorSpecs(ImmutableList.of(expressionPostAgg("p0", "(\"a0\" + \"a1\")")))
.setPostAggregatorSpecs(expressionPostAgg("p0", "(\"a0\" + \"a1\")", ColumnType.FLOAT))
.setLimitSpec(
DefaultLimitSpec
.builder()
@ -4793,9 +4793,7 @@ public class CalciteQueryTest extends BaseCalciteQueryTest
new FloatMaxAggregatorFactory("a1", "m1")
)
.setPostAggregatorSpecs(
ImmutableList.of(
expressionPostAgg("p0", "(\"a0\" + \"a1\")")
)
expressionPostAgg("p0", "(\"a0\" + \"a1\")", ColumnType.FLOAT)
)
.setLimitSpec(
DefaultLimitSpec
@ -4953,7 +4951,9 @@ public class CalciteQueryTest extends BaseCalciteQueryTest
),
new LongSumAggregatorFactory("a1", "cnt")
))
.setPostAggregatorSpecs(ImmutableList.of(expressionPostAgg("p0", "(\"a0\" + \"a1\")")))
.setPostAggregatorSpecs(
expressionPostAgg("p0", "(\"a0\" + \"a1\")", ColumnType.LONG)
)
.setContext(QUERY_CONTEXT_DEFAULT)
.build()
),
@ -5040,8 +5040,8 @@ public class CalciteQueryTest extends BaseCalciteQueryTest
new DoubleMinAggregatorFactory("a5", "v2", null, macroTable)
))
.postAggregators(
expressionPostAgg("p0", "log((\"a1\" + \"a2\"))"),
expressionPostAgg("p1", "(\"a1\" % 4)")
expressionPostAgg("p0", "log((\"a1\" + \"a2\"))", ColumnType.DOUBLE),
expressionPostAgg("p1", "(\"a1\" % 4)", ColumnType.LONG)
)
.context(QUERY_CONTEXT_DEFAULT)
.build()
@ -7269,9 +7269,7 @@ public class CalciteQueryTest extends BaseCalciteQueryTest
)
))
.setPostAggregatorSpecs(
ImmutableList.of(
expressionPostAgg("p0", "((1.0 - (\"a1\" / \"a0\")) * 100)")
)
expressionPostAgg("p0", "((1.0 - (\"a1\" / \"a0\")) * 100)", ColumnType.FLOAT)
)
.setContext(QUERY_CONTEXT_DEFAULT)
.build()
@ -7416,10 +7414,10 @@ public class CalciteQueryTest extends BaseCalciteQueryTest
)
)
.postAggregators(
expressionPostAgg("p0", "CAST(\"a1\", 'DOUBLE')"),
expressionPostAgg("p1", "(\"a0\" / \"a1\")"),
expressionPostAgg("p2", "((\"a0\" / \"a1\") + 3)"),
expressionPostAgg("p3", "((CAST(\"a0\", 'DOUBLE') / CAST(\"a1\", 'DOUBLE')) + 3)")
expressionPostAgg("p0", "CAST(\"a1\", 'DOUBLE')", ColumnType.FLOAT),
expressionPostAgg("p1", "(\"a0\" / \"a1\")", ColumnType.LONG),
expressionPostAgg("p2", "((\"a0\" / \"a1\") + 3)", ColumnType.LONG),
expressionPostAgg("p3", "((CAST(\"a0\", 'DOUBLE') / CAST(\"a1\", 'DOUBLE')) + 3)", ColumnType.FLOAT)
)
.context(QUERY_CONTEXT_DEFAULT)
.build()
@ -8320,13 +8318,10 @@ public class CalciteQueryTest extends BaseCalciteQueryTest
)
)
.setPostAggregatorSpecs(
Collections.singletonList(
new ExpressionPostAggregator(
"p0",
"(CAST(\"_a0\", 'DOUBLE') / \"_a1\")",
null,
ExprMacroTable.nil()
)
expressionPostAgg(
"p0",
"(CAST(\"_a0\", 'DOUBLE') / \"_a1\")",
ColumnType.DOUBLE
)
)
.setContext(QUERY_CONTEXT_DEFAULT)
@ -10206,11 +10201,10 @@ public class CalciteQueryTest extends BaseCalciteQueryTest
)
.setAggregatorSpecs(aggregators(new DoubleSumAggregatorFactory("a0", "m1")))
.setPostAggregatorSpecs(
ImmutableList.of(
expressionPostAgg(
"p0",
"timestamp_floor(\"d0\",'P1M',null,'UTC')"
)
expressionPostAgg(
"p0",
"timestamp_floor(\"d0\",'P1M',null,'UTC')",
ColumnType.LONG
)
)
.setHavingSpec(
@ -10453,12 +10447,13 @@ public class CalciteQueryTest extends BaseCalciteQueryTest
ImmutableList.of()
)
)
.setPostAggregatorSpecs(Collections.singletonList(new ExpressionPostAggregator(
"p0",
"case_searched((\"a1\" == 1),'ALL',\"d0\")",
null,
ExprMacroTable.nil()
)))
.setPostAggregatorSpecs(
expressionPostAgg(
"p0",
"case_searched((\"a1\" == 1),'ALL',\"d0\")",
ColumnType.STRING
)
)
.setContext(QUERY_CONTEXT_DEFAULT)
.build()
),
@ -11179,10 +11174,13 @@ public class CalciteQueryTest extends BaseCalciteQueryTest
.setAggregatorSpecs(
aggregators(new CountAggregatorFactory("a0"), new DoubleSumAggregatorFactory("a1", "m2"))
)
.setPostAggregatorSpecs(Collections.singletonList(expressionPostAgg(
"p0",
"(\"a1\" / \"a0\")"
)))
.setPostAggregatorSpecs(
expressionPostAgg(
"p0",
"(\"a1\" / \"a0\")",
ColumnType.DOUBLE
)
)
.setContext(QUERY_CONTEXT_DEFAULT)
.build()
),
@ -11371,7 +11369,7 @@ public class CalciteQueryTest extends BaseCalciteQueryTest
)
)
.postAggregators(
expressionPostAgg("s0", "(\"a0\" + \"a1\")")
expressionPostAgg("s0", "(\"a0\" + \"a1\")", ColumnType.DOUBLE)
)
.descending(true)
.context(getTimeseriesContextWithFloorTime(TIMESERIES_CONTEXT_BY_GRAN, "d0"))
@ -11432,7 +11430,7 @@ public class CalciteQueryTest extends BaseCalciteQueryTest
new FieldAccessPostAggregator(null, "a0:count")
)
),
expressionPostAgg("s0", "(\"a1\" + \"a2\")")
expressionPostAgg("s0", "(\"a1\" + \"a2\")", ColumnType.DOUBLE)
)
.metric(new DimensionTopNMetricSpec(null, StringComparators.NUMERIC))
.threshold(5)
@ -11936,15 +11934,15 @@ public class CalciteQueryTest extends BaseCalciteQueryTest
// after upgrading to Calcite 1.21, expressions like sin(pi/6) that only reference
// literals are optimized into literals
.postAggregators(
expressionPostAgg("p0", "(exp(\"a0\") + 10)"),
expressionPostAgg("p1", "0.49999999999999994"),
expressionPostAgg("p2", "0.8660254037844387"),
expressionPostAgg("p3", "0.5773502691896257"),
expressionPostAgg("p4", "1.7320508075688776"),
expressionPostAgg("p5", "asin((exp(\"a0\") / 2))"),
expressionPostAgg("p6", "acos((exp(\"a0\") / 2))"),
expressionPostAgg("p7", "atan((exp(\"a0\") / 2))"),
expressionPostAgg("p8", "atan2(exp(\"a0\"),1)")
expressionPostAgg("p0", "(exp(\"a0\") + 10)", ColumnType.DOUBLE),
expressionPostAgg("p1", "0.49999999999999994", ColumnType.DOUBLE),
expressionPostAgg("p2", "0.8660254037844387", ColumnType.DOUBLE),
expressionPostAgg("p3", "0.5773502691896257", ColumnType.DOUBLE),
expressionPostAgg("p4", "1.7320508075688776", ColumnType.DOUBLE),
expressionPostAgg("p5", "asin((exp(\"a0\") / 2))", ColumnType.DOUBLE),
expressionPostAgg("p6", "acos((exp(\"a0\") / 2))", ColumnType.DOUBLE),
expressionPostAgg("p7", "atan((exp(\"a0\") / 2))", ColumnType.DOUBLE),
expressionPostAgg("p8", "atan2(exp(\"a0\"),1)", ColumnType.DOUBLE)
)
.context(QUERY_CONTEXT_DEFAULT)
.build()),
@ -12196,8 +12194,8 @@ public class CalciteQueryTest extends BaseCalciteQueryTest
.setGranularity(Granularities.ALL)
.setDimensions(dimensions(new DefaultDimensionSpec("dim1", "d0")))
.setPostAggregatorSpecs(ImmutableList.of(
expressionPostAgg("p0", "left(\"d0\",2)"),
expressionPostAgg("p1", "right(\"d0\",2)")
expressionPostAgg("p0", "left(\"d0\",2)", ColumnType.STRING),
expressionPostAgg("p1", "right(\"d0\",2)", ColumnType.STRING)
))
.setContext(QUERY_CONTEXT_DEFAULT)
.build()
@ -13717,7 +13715,7 @@ public class CalciteQueryTest extends BaseCalciteQueryTest
)
.granularity(Granularities.ALL)
.postAggregators(
new ExpressionPostAggregator("p0", "'A'", null, ExprMacroTable.nil())
expressionPostAgg("p0", "'A'", ColumnType.STRING)
)
.context(QUERY_CONTEXT_DO_SKIP_EMPTY_BUCKETS)
.build()
@ -13741,7 +13739,7 @@ public class CalciteQueryTest extends BaseCalciteQueryTest
.setDimFilter(equality("dim1", "wat", ColumnType.STRING))
.setPostAggregatorSpecs(
ImmutableList.of(
new ExpressionPostAggregator("p0", "'A'", null, ExprMacroTable.nil())
expressionPostAgg("p0", "'A'", ColumnType.STRING)
)
)
@ -13771,8 +13769,8 @@ public class CalciteQueryTest extends BaseCalciteQueryTest
)
.granularity(Granularities.ALL)
.postAggregators(
new ExpressionPostAggregator("p0", "'A'", null, ExprMacroTable.nil()),
new ExpressionPostAggregator("p1", "'wat'", null, ExprMacroTable.nil())
expressionPostAgg("p0", "'A'", ColumnType.STRING),
expressionPostAgg("p1", "'wat'", ColumnType.STRING)
)
.context(QUERY_CONTEXT_DO_SKIP_EMPTY_BUCKETS)
.build()
@ -13796,8 +13794,8 @@ public class CalciteQueryTest extends BaseCalciteQueryTest
)
.granularity(Granularities.ALL)
.postAggregators(
new ExpressionPostAggregator("p0", "'A'", null, ExprMacroTable.nil()),
new ExpressionPostAggregator("p1", "'10.1'", null, ExprMacroTable.nil())
expressionPostAgg("p0", "'A'", ColumnType.STRING),
expressionPostAgg("p1", "'10.1'", ColumnType.STRING)
)
.context(QUERY_CONTEXT_DO_SKIP_EMPTY_BUCKETS)
.build()
@ -13871,7 +13869,7 @@ public class CalciteQueryTest extends BaseCalciteQueryTest
.setGranularity(Granularities.ALL)
.addDimension(new DefaultDimensionSpec("dim1", "_d0"))
.setPostAggregatorSpecs(ImmutableList.of(
expressionPostAgg("p0", "0")
expressionPostAgg("p0", "0", ColumnType.LONG)
))
.build()
);
@ -13900,7 +13898,7 @@ public class CalciteQueryTest extends BaseCalciteQueryTest
.addDimension(new DefaultDimensionSpec("dim1", "_d0"))
.addAggregator(new LongMaxAggregatorFactory("a0", "v0"))
.setPostAggregatorSpecs(ImmutableList.of(
expressionPostAgg("p0", "isnull(\"a0\")")
expressionPostAgg("p0", "isnull(\"a0\")", ColumnType.LONG)
))
.build()
);
@ -13938,7 +13936,7 @@ public class CalciteQueryTest extends BaseCalciteQueryTest
.addDimension(new DefaultDimensionSpec("l1", "_d0", ColumnType.LONG))
.addAggregator(new StringLastAggregatorFactory("a0", "v0", null, 1024))
.setPostAggregatorSpecs(ImmutableList.of(
expressionPostAgg("p0", "isnull(\"a0\")")
expressionPostAgg("p0", "isnull(\"a0\")", ColumnType.LONG)
))
.build()
),
@ -13980,7 +13978,7 @@ public class CalciteQueryTest extends BaseCalciteQueryTest
.addDimension(new DefaultDimensionSpec("dim1", "_d0", ColumnType.STRING))
.addAggregator(new LongSumAggregatorFactory("a0", "l1"))
.setPostAggregatorSpecs(ImmutableList.of(
expressionPostAgg("p0", "case_searched((\"a0\" == 0),1,0)")))
expressionPostAgg("p0", "case_searched((\"a0\" == 0),1,0)", ColumnType.LONG)))
.build()
),

View File

@ -366,17 +366,17 @@ public class CalciteSelectQueryTest extends BaseCalciteQueryTest
.granularity(Granularities.ALL)
.aggregators(aggregators(new CountAggregatorFactory("a0")))
.postAggregators(
expressionPostAgg("p0", "'foo'"),
expressionPostAgg("p1", "'xfoo'"),
expressionPostAgg("p2", "'foo'"),
expressionPostAgg("p3", "' foo'"),
expressionPostAgg("p4", "'foo'"),
expressionPostAgg("p5", "'foo'"),
expressionPostAgg("p6", "'foo'"),
expressionPostAgg("p7", "'foo '"),
expressionPostAgg("p8", "'foox'"),
expressionPostAgg("p9", "' foo'"),
expressionPostAgg("p10", "'xfoo'")
expressionPostAgg("p0", "'foo'", ColumnType.STRING),
expressionPostAgg("p1", "'xfoo'", ColumnType.STRING),
expressionPostAgg("p2", "'foo'", ColumnType.STRING),
expressionPostAgg("p3", "' foo'", ColumnType.STRING),
expressionPostAgg("p4", "'foo'", ColumnType.STRING),
expressionPostAgg("p5", "'foo'", ColumnType.STRING),
expressionPostAgg("p6", "'foo'", ColumnType.STRING),
expressionPostAgg("p7", "'foo '", ColumnType.STRING),
expressionPostAgg("p8", "'foox'", ColumnType.STRING),
expressionPostAgg("p9", "' foo'", ColumnType.STRING),
expressionPostAgg("p10", "'xfoo'", ColumnType.STRING)
)
.context(QUERY_CONTEXT_DEFAULT)
.build()
@ -407,12 +407,12 @@ public class CalciteSelectQueryTest extends BaseCalciteQueryTest
.granularity(Granularities.ALL)
.aggregators(aggregators(new CountAggregatorFactory("a0")))
.postAggregators(
expressionPostAgg("p0", "'xxfoo'"),
expressionPostAgg("p1", "'fo'"),
expressionPostAgg("p2", "' foo'"),
expressionPostAgg("p3", "'fooxx'"),
expressionPostAgg("p4", "'fo'"),
expressionPostAgg("p5", "'foo '")
expressionPostAgg("p0", "'xxfoo'", ColumnType.STRING),
expressionPostAgg("p1", "'fo'", ColumnType.STRING),
expressionPostAgg("p2", "' foo'", ColumnType.STRING),
expressionPostAgg("p3", "'fooxx'", ColumnType.STRING),
expressionPostAgg("p4", "'fo'", ColumnType.STRING),
expressionPostAgg("p5", "'foo '", ColumnType.STRING)
)
.context(QUERY_CONTEXT_DEFAULT)
.build()
@ -1005,7 +1005,7 @@ public class CalciteSelectQueryTest extends BaseCalciteQueryTest
new DoubleSumAggregatorFactory("a1", "m2")
))
.postAggregators(
expressionPostAgg("p0", "(exp(\"a0\") + 10)")
expressionPostAgg("p0", "(exp(\"a0\") + 10)", ColumnType.DOUBLE)
)
.context(QUERY_CONTEXT_DEFAULT)
.build()),
@ -1030,7 +1030,7 @@ public class CalciteSelectQueryTest extends BaseCalciteQueryTest
new DoubleSumAggregatorFactory("a1", "m2")
))
.postAggregators(
expressionPostAgg("p0", "(exp(\"a0\") + 10)")
expressionPostAgg("p0", "(exp(\"a0\") + 10)", ColumnType.DOUBLE)
)
.context(QUERY_CONTEXT_DEFAULT)
.build()),

View File

@ -496,7 +496,7 @@ public class CalciteSubqueryTest extends BaseCalciteQueryTest
))
.setAggregatorSpecs(aggregators(new CountAggregatorFactory("a0")))
.setPostAggregatorSpecs(
ImmutableList.of(expressionPostAgg("p0", "'abc'"))
expressionPostAgg("p0", "'abc'", ColumnType.STRING)
)
.setHavingSpec(having(equality("a0", 1L, ColumnType.LONG)))
.setContext(QUERY_CONTEXT_DEFAULT)
@ -576,17 +576,15 @@ public class CalciteSubqueryTest extends BaseCalciteQueryTest
)
)
.setPostAggregatorSpecs(
ImmutableList.of(
new ArithmeticPostAggregator(
"_a2",
"quotient",
ImmutableList.of(
new FieldAccessPostAggregator(null, "_a2:sum"),
new FieldAccessPostAggregator(null, "_a2:count")
)
),
expressionPostAgg("p0", "timestamp_extract(\"_a3\",'EPOCH','UTC')")
)
new ArithmeticPostAggregator(
"_a2",
"quotient",
ImmutableList.of(
new FieldAccessPostAggregator(null, "_a2:sum"),
new FieldAccessPostAggregator(null, "_a2:count")
)
),
expressionPostAgg("p0", "timestamp_extract(\"_a3\",'EPOCH','UTC')", ColumnType.LONG)
)
.setContext(queryContext)
.build()

View File

@ -134,6 +134,7 @@
"querying/joins",
"querying/lookups",
"querying/multi-value-dimensions",
"querying/arrays",
"querying/nested-columns",
"querying/multitenancy",
"querying/caching",