only allow lowering maxResults and maxIntermediateRows from groupBy query context

This commit is contained in:
Himanshu Gupta 2016-03-08 15:03:59 -06:00
parent 9e31e2ce0e
commit ca5de3f583
5 changed files with 14 additions and 9 deletions

View File

@ -68,8 +68,8 @@ The broker uses processing configs for nested groupBy queries. And, optionally,
|Property|Description|Default| |Property|Description|Default|
|--------|-----------|-------| |--------|-----------|-------|
|`druid.query.groupBy.singleThreaded`|Run single threaded group By queries.|false| |`druid.query.groupBy.singleThreaded`|Run single threaded group By queries.|false|
|`druid.query.groupBy.maxIntermediateRows`|Maximum number of intermediate rows. This can be overriden at query time by `maxIntermediateRows` attribute in query context.|50000| |`druid.query.groupBy.maxIntermediateRows`|Maximum number of intermediate rows. This can be lowered at query time by `maxIntermediateRows` attribute in query context.|50000|
|`druid.query.groupBy.maxResults`|Maximum number of results. This can be overriden at query time by `maxResults` attribute in query context.|500000| |`druid.query.groupBy.maxResults`|Maximum number of results. This can be lowered at query time by `maxResults` attribute in query context.|500000|
##### Search Query Config ##### Search Query Config

View File

@ -66,8 +66,8 @@ Druid uses Jetty to serve HTTP requests.
|Property|Description|Default| |Property|Description|Default|
|--------|-----------|-------| |--------|-----------|-------|
|`druid.query.groupBy.singleThreaded`|Run single threaded group By queries.|false| |`druid.query.groupBy.singleThreaded`|Run single threaded group By queries.|false|
|`druid.query.groupBy.maxIntermediateRows`|Maximum number of intermediate rows. This can be overriden at query time by `maxIntermediateRows` attribute in query context.|50000| |`druid.query.groupBy.maxIntermediateRows`|Maximum number of intermediate rows. This can be lowered at query time by `maxIntermediateRows` attribute in query context.|50000|
|`druid.query.groupBy.maxResults`|Maximum number of results. This can be overriden at query time by `maxResults` attribute in query context.|500000| |`druid.query.groupBy.maxResults`|Maximum number of results. This can be lowered at query time by `maxResults` attribute in query context.|500000|
##### Search Query Config ##### Search Query Config

View File

@ -18,6 +18,6 @@ The query context is used for various query configuration parameters.
|finalize | `true` | Flag indicating whether to "finalize" aggregation results. Primarily used for debugging. For instance, the `hyperUnique` aggregator will return the full HyperLogLog sketch instead of the estimated cardinality when this flag is set to `false` | |finalize | `true` | Flag indicating whether to "finalize" aggregation results. Primarily used for debugging. For instance, the `hyperUnique` aggregator will return the full HyperLogLog sketch instead of the estimated cardinality when this flag is set to `false` |
|chunkPeriod | `0` (off) | At the broker node level, long interval queries (of any type) may be broken into shorter interval queries, reducing the impact on resources. Use ISO 8601 periods. For example, if this property is set to `P1M` (one month), then a query covering a year would be broken into 12 smaller queries. All the query chunks will be processed asynchronously inside query processing executor service. Make sure "druid.processing.numThreads" is configured appropriately on the broker. | |chunkPeriod | `0` (off) | At the broker node level, long interval queries (of any type) may be broken into shorter interval queries, reducing the impact on resources. Use ISO 8601 periods. For example, if this property is set to `P1M` (one month), then a query covering a year would be broken into 12 smaller queries. All the query chunks will be processed asynchronously inside query processing executor service. Make sure "druid.processing.numThreads" is configured appropriately on the broker. |
|minTopNThreshold | `1000` | The top minTopNThreshold local results from each segment are returned for merging to determine the global topN. | |minTopNThreshold | `1000` | The top minTopNThreshold local results from each segment are returned for merging to determine the global topN. |
|`maxResults`|500000|Maximum number of results groupBy query can process.| |`maxResults`|500000|Maximum number of results groupBy query can process. Default value used can be changed by `druid.query.groupBy.maxResults` in druid configuration at broker and historical nodes. At query time you can only lower the value.|
|`maxIntermediateRows`|50000|Maximum number of intermediate rows while processing single segment for groupBy query.| |`maxIntermediateRows`|50000|Maximum number of intermediate rows while processing single segment for groupBy query. Default value used can be changed by `druid.query.groupBy.maxIntermediateRows` in druid configuration at broker and historical nodes. At query time you can only lower the value.|

View File

@ -310,7 +310,12 @@ public class GroupByQueryEngine
this.cursor = cursor; this.cursor = cursor;
this.metricsBuffer = metricsBuffer; this.metricsBuffer = metricsBuffer;
this.maxIntermediateRows = query.getContextValue(CTX_KEY_MAX_INTERMEDIATE_ROWS, config.getMaxIntermediateRows()); this.maxIntermediateRows = Math.min(
query.getContextValue(
CTX_KEY_MAX_INTERMEDIATE_ROWS,
config.getMaxIntermediateRows()
), config.getMaxIntermediateRows()
);
unprocessedKeys = null; unprocessedKeys = null;
delegate = Iterators.emptyIterator(); delegate = Iterators.emptyIterator();

View File

@ -90,7 +90,7 @@ public class GroupByQueryHelper
aggs.toArray(new AggregatorFactory[aggs.size()]), aggs.toArray(new AggregatorFactory[aggs.size()]),
false, false,
true, true,
query.getContextValue(CTX_KEY_MAX_RESULTS, config.getMaxResults()), Math.min(query.getContextValue(CTX_KEY_MAX_RESULTS, config.getMaxResults()), config.getMaxResults()),
bufferPool bufferPool
); );
} else { } else {
@ -102,7 +102,7 @@ public class GroupByQueryHelper
aggs.toArray(new AggregatorFactory[aggs.size()]), aggs.toArray(new AggregatorFactory[aggs.size()]),
false, false,
true, true,
query.getContextValue(CTX_KEY_MAX_RESULTS, config.getMaxResults()) Math.min(query.getContextValue(CTX_KEY_MAX_RESULTS, config.getMaxResults()), config.getMaxResults())
); );
} }