mirror of https://github.com/apache/druid.git
Update docs for druid.processing.numThreads in brokers (#12231)
* Update docs for druid.processing.numThreads * error msg * one more reference
This commit is contained in:
parent
de82c611de
commit
159f97dcb0
|
@ -1620,7 +1620,6 @@ Druid uses Jetty to serve HTTP requests.
|
|||
|--------|-----------|-------|
|
||||
|`druid.processing.buffer.sizeBytes`|This specifies a buffer size (less than 2GiB), for the storage of intermediate results. The computation engine in both the Historical and Realtime processes will use a scratch buffer of this size to do all of their intermediate computations off-heap. Larger values allow for more aggregations in a single pass over the data while smaller values can require more passes depending on the query that is being executed. [Human-readable format](human-readable-byte.md) is supported.|auto (max 1GiB)|
|
||||
|`druid.processing.buffer.poolCacheMaxCount`|processing buffer pool caches the buffers for later use, this is the maximum count cache will grow to. note that pool can create more buffers than it can cache if necessary.|Integer.MAX_VALUE|
|
||||
|`druid.processing.buffer.poolCacheInitialCount`|initializes the number of buffers allocated on the intermediate results pool. Note that pool can create more buffers if necessary.|`0`|
|
||||
|`druid.processing.formatString`|Realtime and Historical processes use this format string to name their processing threads.|processing-%s|
|
||||
|`druid.processing.numMergeBuffers`|The number of direct memory buffers available for merging query results. The buffers are sized by `druid.processing.buffer.sizeBytes`. This property is effectively a concurrency limit for queries that require merging buffers. If you are using any queries that require merge buffers (currently, just groupBy v2) then you should have at least two of these.|`max(2, druid.processing.numThreads / 4)`|
|
||||
|`druid.processing.numThreads`|The number of processing threads to have available for parallel processing of segments. Our rule of thumb is `num_cores - 1`, which means that even under heavy load there will still be one core available to do background tasks like talking with ZooKeeper and pulling down segments. If only one core is available, this property defaults to the value `1`.|Number of cores - 1 (or 1)|
|
||||
|
@ -1783,10 +1782,9 @@ The broker uses processing configs for nested groupBy queries.
|
|||
|Property|Description|Default|
|
||||
|--------|-----------|-------|
|
||||
|`druid.processing.buffer.sizeBytes`|This specifies a buffer size (less than 2GiB) for the storage of intermediate results. The computation engine in both the Historical and Realtime processes will use a scratch buffer of this size to do all of their intermediate computations off-heap. Larger values allow for more aggregations in a single pass over the data while smaller values can require more passes depending on the query that is being executed. [Human-readable format](human-readable-byte.md) is supported.|auto (max 1GiB)|
|
||||
|`druid.processing.buffer.poolCacheInitialCount`|initializes the number of buffers allocated on the intermediate results pool. Note that pool can create more buffers if necessary.|`0`|
|
||||
|`druid.processing.buffer.poolCacheMaxCount`|processing buffer pool caches the buffers for later use, this is the maximum count cache will grow to. note that pool can create more buffers than it can cache if necessary.|Integer.MAX_VALUE|
|
||||
|`druid.processing.formatString`|Realtime and Historical processes use this format string to name their processing threads.|processing-%s|
|
||||
|`druid.processing.numMergeBuffers`|The number of direct memory buffers available for merging query results. The buffers are sized by `druid.processing.buffer.sizeBytes`. This property is effectively a concurrency limit for queries that require merging buffers. If you are using any queries that require merge buffers (currently, just groupBy v2) then you should have at least two of these.|`max(2, druid.processing.numThreads / 4)`|
|
||||
|`druid.processing.numThreads`|The number of processing threads to have available for parallel processing of segments. Our rule of thumb is `num_cores - 1`, which means that even under heavy load there will still be one core available to do background tasks like talking with ZooKeeper and pulling down segments. If only one core is available, this property defaults to the value `1`.|Number of cores - 1 (or 1)|
|
||||
|`druid.processing.columnCache.sizeBytes`|Maximum size in bytes for the dimension value lookup cache. Any value greater than `0` enables the cache. It is currently disabled by default. Enabling the lookup cache can significantly improve the performance of aggregators operating on dimension values, such as the JavaScript aggregator, or cardinality aggregator, but can slow things down if the cache hit rate is low (i.e. dimensions with few repeating values). Enabling it may also require additional garbage collection tuning to avoid long GC pauses.|`0` (disabled)|
|
||||
|`druid.processing.fifo`|If the processing queue should treat tasks of equal priority in a FIFO manner|`false`|
|
||||
|`druid.processing.tmpDir`|Path where temporary files created while processing a query should be stored. If specified, this configuration takes priority over the default `java.io.tmpdir` path.|path represented by `java.io.tmpdir`|
|
||||
|
@ -1799,7 +1797,7 @@ The broker uses processing configs for nested groupBy queries.
|
|||
|`druid.processing.merge.task.smallBatchNumRows`|Size of result batches to operate on in ForkJoinPool merge tasks.|`4096`|
|
||||
|
||||
The amount of direct memory needed by Druid is at least
|
||||
`druid.processing.buffer.sizeBytes * (druid.processing.numMergeBuffers + druid.processing.numThreads + 1)`. You can
|
||||
`druid.processing.buffer.sizeBytes * (druid.processing.numMergeBuffers + 1)`. You can
|
||||
ensure at least this amount of direct memory is available by providing `-XX:MaxDirectMemorySize=<VALUE>` at the command
|
||||
line.
|
||||
|
||||
|
|
|
@ -139,7 +139,6 @@ If caching is enabled on the Broker, the cache is stored on heap, sized by `drui
|
|||
On the Broker, the amount of direct memory needed depends on how many merge buffers (used for merging GroupBys) are configured. The Broker does not generally need processing threads or processing buffers, as query results are merged on-heap in the HTTP connection threads instead.
|
||||
|
||||
- `druid.processing.buffer.sizeBytes` can be set to 500MiB.
|
||||
- `druid.processing.numThreads`: set this to 1 (the minimum allowed)
|
||||
- `druid.processing.numMergeBuffers`: set this to the same value as on Historicals or a bit higher
|
||||
|
||||
#### Connection pool sizing
|
||||
|
@ -176,7 +175,7 @@ If you need Broker HA, you can deploy 2 initially and then use the 1:15 ratio gu
|
|||
To estimate total memory usage of the Broker under these guidelines:
|
||||
|
||||
- Heap: allocated heap size
|
||||
- Direct Memory: `(druid.processing.numThreads + druid.processing.numMergeBuffers + 1) * druid.processing.buffer.sizeBytes`
|
||||
- Direct Memory: `(druid.processing.numMergeBuffers + 1) * druid.processing.buffer.sizeBytes`
|
||||
|
||||
### MiddleManager
|
||||
|
||||
|
|
|
@ -30,7 +30,6 @@ druid.broker.http.maxQueuedBytes=10MiB
|
|||
# Processing threads and buffers
|
||||
druid.processing.buffer.sizeBytes=500MiB
|
||||
druid.processing.numMergeBuffers=6
|
||||
druid.processing.numThreads=1
|
||||
druid.processing.tmpDir=var/druid/processing
|
||||
|
||||
# Query cache disabled -- push down caching and merging instead
|
||||
|
|
|
@ -30,7 +30,6 @@ druid.broker.http.maxQueuedBytes=10MiB
|
|||
# Processing threads and buffers
|
||||
druid.processing.buffer.sizeBytes=500MiB
|
||||
druid.processing.numMergeBuffers=16
|
||||
druid.processing.numThreads=1
|
||||
druid.processing.tmpDir=var/druid/processing
|
||||
|
||||
# Query cache disabled -- push down caching and merging instead
|
||||
|
|
|
@ -30,7 +30,6 @@ druid.broker.http.maxQueuedBytes=10MiB
|
|||
# Processing threads and buffers
|
||||
druid.processing.buffer.sizeBytes=500MiB
|
||||
druid.processing.numMergeBuffers=4
|
||||
druid.processing.numThreads=1
|
||||
druid.processing.tmpDir=var/druid/processing
|
||||
|
||||
# Query cache disabled -- push down caching and merging instead
|
||||
|
|
|
@ -30,7 +30,6 @@ druid.broker.http.maxQueuedBytes=5MiB
|
|||
# Processing threads and buffers
|
||||
druid.processing.buffer.sizeBytes=100MiB
|
||||
druid.processing.numMergeBuffers=2
|
||||
druid.processing.numThreads=1
|
||||
druid.processing.tmpDir=var/druid/processing
|
||||
|
||||
# Query cache disabled -- push down caching and merging instead
|
||||
|
|
|
@ -30,7 +30,6 @@ druid.broker.http.maxQueuedBytes=5MiB
|
|||
# Processing threads and buffers
|
||||
druid.processing.buffer.sizeBytes=50MiB
|
||||
druid.processing.numMergeBuffers=2
|
||||
druid.processing.numThreads=1
|
||||
druid.processing.tmpDir=var/druid/processing
|
||||
|
||||
# Query cache disabled -- push down caching and merging instead
|
||||
|
|
|
@ -30,7 +30,6 @@ druid.broker.http.maxQueuedBytes=5MiB
|
|||
# Processing threads and buffers
|
||||
druid.processing.buffer.sizeBytes=500MiB
|
||||
druid.processing.numMergeBuffers=2
|
||||
druid.processing.numThreads=1
|
||||
druid.processing.tmpDir=var/druid/processing
|
||||
|
||||
# Query cache disabled -- push down caching and merging instead
|
||||
|
|
|
@ -30,7 +30,6 @@ druid.broker.http.maxQueuedBytes=10MiB
|
|||
# Processing threads and buffers
|
||||
druid.processing.buffer.sizeBytes=500MiB
|
||||
druid.processing.numMergeBuffers=16
|
||||
druid.processing.numThreads=1
|
||||
druid.processing.tmpDir=var/druid/processing
|
||||
|
||||
# Query cache disabled -- push down caching and merging instead
|
||||
|
|
|
@ -150,7 +150,6 @@ spec:
|
|||
|
||||
# Processing threads and buffers
|
||||
druid.processing.buffer.sizeBytes=25000000
|
||||
druid.processing.numThreads=1
|
||||
druid.sql.enable=true
|
||||
extra.jvm.options: |-
|
||||
-Xmx512m
|
||||
|
|
|
@ -161,7 +161,7 @@ public class BrokerProcessingModule implements Module
|
|||
if (maxDirectMemory < memoryNeeded) {
|
||||
throw new ProvisionException(
|
||||
StringUtils.format(
|
||||
"Not enough direct memory. Please adjust -XX:MaxDirectMemorySize, druid.processing.buffer.sizeBytes, druid.processing.numThreads, or druid.processing.numMergeBuffers: "
|
||||
"Not enough direct memory. Please adjust -XX:MaxDirectMemorySize, druid.processing.buffer.sizeBytes, or druid.processing.numMergeBuffers: "
|
||||
+ "maxDirectMemory[%,d], memoryNeeded[%,d] = druid.processing.buffer.sizeBytes[%,d] * (druid.processing.numMergeBuffers[%,d] + 1)",
|
||||
maxDirectMemory,
|
||||
memoryNeeded,
|
||||
|
|
Loading…
Reference in New Issue