Add single-machine deployment example cfgs and scripts (#7590)

* Add single-machine deployment example cfgs and scripts

* Add (8u92+)

* Use combined coordinator-overlord for single machine confs

* RAT fix
This commit is contained in:
Jonathan Wei 2019-05-06 19:11:13 -07:00 committed by Gian Merlino
parent 51a62cb31b
commit 7c2ca474da
134 changed files with 2181 additions and 269 deletions

View File

@ -41,7 +41,17 @@
</includes> </includes>
<outputDirectory>hadoop-dependencies</outputDirectory> <outputDirectory>hadoop-dependencies</outputDirectory>
</fileSet> </fileSet>
<fileSet>
<directory>../examples/conf</directory>
<includes>
<include>*</include>
<include>*/*</include>
<include>*/*/*</include>
<include>*/*/*/*</include>
<include>*/*/*/*/*</include>
</includes>
<outputDirectory>conf</outputDirectory>
</fileSet>
<fileSet> <fileSet>
<directory>../examples/quickstart/</directory> <directory>../examples/quickstart/</directory>
<includes> <includes>
@ -56,83 +66,6 @@
</includes> </includes>
<outputDirectory>quickstart/tutorial</outputDirectory> <outputDirectory>quickstart/tutorial</outputDirectory>
</fileSet> </fileSet>
<fileSet>
<directory>../examples/quickstart/tutorial/conf</directory>
<includes>
<include>*</include>
</includes>
<outputDirectory>quickstart/tutorial/conf</outputDirectory>
</fileSet>
<fileSet>
<directory>../examples/quickstart/tutorial/conf/druid</directory>
<includes>
<include>*</include>
</includes>
<outputDirectory>quickstart/tutorial/conf/druid</outputDirectory>
</fileSet>
<fileSet>
<directory>../examples/quickstart/tutorial/conf/druid/_common</directory>
<includes>
<include>*</include>
</includes>
<outputDirectory>quickstart/tutorial/conf/druid/_common/</outputDirectory>
</fileSet>
<fileSet>
<directory>../examples/quickstart/tutorial/conf/druid/broker</directory>
<includes>
<include>*</include>
</includes>
<outputDirectory>quickstart/tutorial/conf/druid/broker</outputDirectory>
</fileSet>
<fileSet>
<directory>../examples/quickstart/tutorial/conf/druid/coordinator</directory>
<includes>
<include>*</include>
</includes>
<outputDirectory>quickstart/tutorial/conf/druid/coordinator</outputDirectory>
</fileSet>
<fileSet>
<directory>../examples/quickstart/tutorial/conf/druid/historical</directory>
<includes>
<include>*</include>
</includes>
<outputDirectory>quickstart/tutorial/conf/druid/historical</outputDirectory>
</fileSet>
<fileSet>
<directory>../examples/quickstart/tutorial/conf/druid/overlord</directory>
<includes>
<include>*</include>
</includes>
<outputDirectory>quickstart/tutorial/conf/druid/overlord</outputDirectory>
</fileSet>
<fileSet>
<directory>../examples/quickstart/tutorial/conf/druid/middleManager</directory>
<includes>
<include>*</include>
</includes>
<outputDirectory>quickstart/tutorial/conf/druid/middleManager</outputDirectory>
</fileSet>
<fileSet>
<directory>../examples/quickstart/tutorial/conf/druid/router</directory>
<includes>
<include>*</include>
</includes>
<outputDirectory>quickstart/tutorial/conf/druid/router</outputDirectory>
</fileSet>
<fileSet>
<directory>../examples/quickstart/tutorial/conf/tranquility</directory>
<includes>
<include>*</include>
</includes>
<outputDirectory>quickstart/tutorial/conf/tranquility</outputDirectory>
</fileSet>
<fileSet>
<directory>../examples/quickstart/tutorial/conf/zk</directory>
<includes>
<include>*</include>
</includes>
<outputDirectory>quickstart/tutorial/conf/zk</outputDirectory>
</fileSet>
<fileSet> <fileSet>
<directory>../examples/quickstart/tutorial/hadoop</directory> <directory>../examples/quickstart/tutorial/hadoop</directory>
<includes> <includes>
@ -147,8 +80,6 @@
</includes> </includes>
<outputDirectory>quickstart/tutorial/hadoop/docker</outputDirectory> <outputDirectory>quickstart/tutorial/hadoop/docker</outputDirectory>
</fileSet> </fileSet>
<fileSet> <fileSet>
<directory>../examples/quickstart/protobuf</directory> <directory>../examples/quickstart/protobuf</directory>
<includes> <includes>
@ -156,70 +87,6 @@
</includes> </includes>
<outputDirectory>quickstart/protobuf</outputDirectory> <outputDirectory>quickstart/protobuf</outputDirectory>
</fileSet> </fileSet>
<fileSet>
<directory>../examples/conf</directory>
<includes>
<include>*</include>
</includes>
<outputDirectory>conf</outputDirectory>
</fileSet>
<fileSet>
<directory>../examples/conf/druid/_common</directory>
<includes>
<include>*</include>
</includes>
<outputDirectory>conf/druid/_common</outputDirectory>
</fileSet>
<fileSet>
<directory>../examples/conf/druid/broker</directory>
<includes>
<include>*</include>
</includes>
<outputDirectory>conf/druid/broker</outputDirectory>
</fileSet>
<fileSet>
<directory>../examples/conf/druid/coordinator</directory>
<includes>
<include>*</include>
</includes>
<outputDirectory>conf/druid/coordinator</outputDirectory>
</fileSet>
<fileSet>
<directory>../examples/conf/druid/historical</directory>
<includes>
<include>*</include>
</includes>
<outputDirectory>conf/druid/historical</outputDirectory>
</fileSet>
<fileSet>
<directory>../examples/conf/druid/overlord</directory>
<includes>
<include>*</include>
</includes>
<outputDirectory>conf/druid/overlord</outputDirectory>
</fileSet>
<fileSet>
<directory>../examples/conf/druid/middleManager</directory>
<includes>
<include>*</include>
</includes>
<outputDirectory>conf/druid/middleManager</outputDirectory>
</fileSet>
<fileSet>
<directory>../examples/conf/druid/router</directory>
<includes>
<include>*</include>
</includes>
<outputDirectory>conf/druid/router</outputDirectory>
</fileSet>
<fileSet>
<directory>../examples/conf/tranquility</directory>
<includes>
<include>*</include>
</includes>
<outputDirectory>conf/tranquility</outputDirectory>
</fileSet>
<fileSet> <fileSet>
<directory>../examples/bin</directory> <directory>../examples/bin</directory>
<includes> <includes>
@ -228,7 +95,6 @@
<fileMode>744</fileMode> <fileMode>744</fileMode>
<outputDirectory>bin</outputDirectory> <outputDirectory>bin</outputDirectory>
</fileSet> </fileSet>
<fileSet> <fileSet>
<directory>../</directory> <directory>../</directory>
<includes> <includes>

View File

@ -0,0 +1,69 @@
---
layout: doc_page
title: "Single Server Deployments"
---
<!--
~ Licensed to the Apache Software Foundation (ASF) under one
~ or more contributor license agreements. See the NOTICE file
~ distributed with this work for additional information
~ regarding copyright ownership. The ASF licenses this file
~ to you under the Apache License, Version 2.0 (the
~ "License"); you may not use this file except in compliance
~ with the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing,
~ software distributed under the License is distributed on an
~ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
~ KIND, either express or implied. See the License for the
~ specific language governing permissions and limitations
~ under the License.
-->
# Single Server Deployments
Druid includes a set of reference configurations and launch scripts for single-machine deployments:
- `micro-quickstart`
- `small`
- `medium`
- `large`
- `xlarge`
The `micro-quickstart` is sized for small machines like laptops and is intended for quick evaluation use-cases.
The other configurations are intended for general use single-machine deployments. They are sized for hardware roughly based on Amazon's i3 series of EC2 instances.
The startup scripts for these example configurations run a single ZK instance along with the Druid services. You can choose to deploy ZK separately as well.
The example configurations run the Druid Coordinator and Overlord together in a single process using the optional configuration `druid.coordinator.asOverlord.enabled=true`, described in the [Coordinator configuration documentation](../configuration/index.html#coordinator-operation).
## Single Server Reference Configurations
Micro-Quickstart: 4 CPU, 16GB RAM
------------
Launch command: `bin/start-micro-quickstart`
Configuration directory: `conf/druid/single-server/micro-quickstart`
Small: 8 CPU, 64GB RAM (~i3.2xlarge)
------------
Launch command: `bin/start-small`
Configuration directory: `conf/druid/single-server/small`
Medium: 16 CPU, 128GB RAM (~i3.4xlarge)
------------
Launch command: `bin/start-medium`
Configuration directory: `conf/druid/single-server/medium`
Large: 32 CPU, 256GB RAM (~i3.8xlarge)
------------
Launch command: `bin/start-large`
Configuration directory: `conf/druid/single-server/large`
X-Large: 64 CPU, 512GB RAM (~i3.16xlarge)
------------
Launch command: `bin/start-xlarge`
Configuration directory: `conf/druid/single-server/xlarge`

View File

@ -22,7 +22,7 @@ title: "Apache Druid (incubating) Quickstart"
~ under the License. ~ under the License.
--> -->
# Druid Quickstart # Apache Druid (incubating) Quickstart
In this quickstart, we will download Druid and set it up on a single machine. The cluster will be ready to load data In this quickstart, we will download Druid and set it up on a single machine. The cluster will be ready to load data
after completing this initial setup. after completing this initial setup.
@ -32,12 +32,11 @@ Before beginning the quickstart, it is helpful to read the [general Druid overvi
## Prerequisites ## Prerequisites
You will need: ### Software
* Java 8 You will need:
* Java 8 (8u92+)
* Linux, Mac OS X, or other Unix-like OS (Windows is not supported) * Linux, Mac OS X, or other Unix-like OS (Windows is not supported)
* 8G of RAM
* 2 vCPUs
On Mac OS X, you can use [Oracle's JDK On Mac OS X, you can use [Oracle's JDK
8](http://www.oracle.com/technetwork/java/javase/downloads/jdk8-downloads-2133151.html) to install 8](http://www.oracle.com/technetwork/java/javase/downloads/jdk8-downloads-2133151.html) to install
@ -47,6 +46,15 @@ On Linux, your OS package manager should be able to help for Java. If your Ubunt
based OS does not have a recent enough version of Java, WebUpd8 offers [packages for those based OS does not have a recent enough version of Java, WebUpd8 offers [packages for those
OSes](http://www.webupd8.org/2012/09/install-oracle-java-8-in-ubuntu-via-ppa.html). OSes](http://www.webupd8.org/2012/09/install-oracle-java-8-in-ubuntu-via-ppa.html).
### Hardware
Druid includes several example [single-server configurations](../operations/single-server.html), along with scripts to start the Druid processes using these configurations.
If you're running on a small machine such as a laptop for a quick evaluation, the `micro-quickstart` configuration is a good choice, sized for a 4CPU/16GB RAM environment.
If you plan to use the single-machine deployment for further evaluation beyond the tutorials, we recommend a larger configuration than `micro-quickstart`.
## Getting started ## Getting started
[Download](https://www.apache.org/dyn/closer.cgi?path=/incubator/druid/#{DRUIDVERSION}/apache-druid-#{DRUIDVERSION}-bin.tar.gz) [Download](https://www.apache.org/dyn/closer.cgi?path=/incubator/druid/#{DRUIDVERSION}/apache-druid-#{DRUIDVERSION}-bin.tar.gz)
@ -86,32 +94,33 @@ The startup scripts for the tutorial will expect the contents of the Zookeeper t
## Start up Druid services ## Start up Druid services
The following commands will assume that you are using the `micro-quickstart` single-machine configuration. If you are using a different configuration, the `bin` directory has equivalent scripts for each configuration, such as `bin/start-single-server-small`.
From the apache-druid-#{DRUIDVERSION} package root, run the following command: From the apache-druid-#{DRUIDVERSION} package root, run the following command:
```bash ```bash
bin/supervise -c quickstart/tutorial/conf/tutorial-cluster.conf ./bin/start-micro-quickstart
``` ```
This will bring up instances of Zookeeper and the Druid services, all running on the local machine, e.g.: This will bring up instances of Zookeeper and the Druid services, all running on the local machine, e.g.:
```bash ```bash
bin/supervise -c quickstart/tutorial/conf/tutorial-cluster.conf $ ./bin/start-micro-quickstart
[Wed Feb 27 12:46:13 2019] Running command[zk], logging to[/apache-druid-#{DRUIDVERSION}/var/sv/zk.log]: bin/run-zk quickstart/tutorial/conf [Fri May 3 11:40:50 2019] Running command[zk], logging to[/apache-druid-#{DRUIDVERSION}/var/sv/zk.log]: bin/run-zk conf
[Wed Feb 27 12:46:13 2019] Running command[coordinator], logging to[/apache-druid-#{DRUIDVERSION}/var/sv/coordinator.log]: bin/run-druid coordinator quickstart/tutorial/conf [Fri May 3 11:40:50 2019] Running command[coordinator-overlord], logging to[/apache-druid-#{DRUIDVERSION}/var/sv/coordinator-overlord.log]: bin/run-druid coordinator-overlord conf/druid/single-server/micro-quickstart
[Wed Feb 27 12:46:13 2019] Running command[broker], logging to[/apache-druid-#{DRUIDVERSION}/var/sv/broker.log]: bin/run-druid broker quickstart/tutorial/conf [Fri May 3 11:40:50 2019] Running command[broker], logging to[/apache-druid-#{DRUIDVERSION}/var/sv/broker.log]: bin/run-druid broker conf/druid/single-server/micro-quickstart
[Wed Feb 27 12:46:13 2019] Running command[router], logging to[/apache-druid-#{DRUIDVERSION}/var/sv/router.log]: bin/run-druid router quickstart/tutorial/conf [Fri May 3 11:40:50 2019] Running command[router], logging to[/apache-druid-#{DRUIDVERSION}/var/sv/router.log]: bin/run-druid router conf/druid/single-server/micro-quickstart
[Wed Feb 27 12:46:13 2019] Running command[historical], logging to[/apache-druid-#{DRUIDVERSION}/var/sv/historical.log]: bin/run-druid historical quickstart/tutorial/conf [Fri May 3 11:40:50 2019] Running command[historical], logging to[/apache-druid-#{DRUIDVERSION}/var/sv/historical.log]: bin/run-druid historical conf/druid/single-server/micro-quickstart
[Wed Feb 27 12:46:13 2019] Running command[overlord], logging to[/apache-druid-#{DRUIDVERSION}/var/sv/overlord.log]: bin/run-druid overlord quickstart/tutorial/conf [Fri May 3 11:40:50 2019] Running command[middleManager], logging to[/apache-druid-#{DRUIDVERSION}/var/sv/middleManager.log]: bin/run-druid middleManager conf/druid/single-server/micro-quickstart
[Wed Feb 27 12:46:13 2019] Running command[middleManager], logging to[/apache-druid-#{DRUIDVERSION}/var/sv/middleManager.log]: bin/run-druid middleManager quickstart/tutorial/conf
``` ```
All persistent state such as the cluster metadata store and segments for the services will be kept in the `var` directory under the apache-druid-#{DRUIDVERSION} package root. Logs for the services are located at `var/sv`. All persistent state such as the cluster metadata store and segments for the services will be kept in the `var` directory under the apache-druid-#{DRUIDVERSION} package root. Logs for the services are located at `var/sv`.
Later on, if you'd like to stop the services, CTRL-C to exit the `bin/supervise` script, which will terminate the Druid processes. Later on, if you'd like to stop the services, CTRL-C to exit the `bin/start-micro-quickstart` script, which will terminate the Druid processes.
### Resetting cluster state ### Resetting cluster state
If you want a clean start after stopping the services, delete the `var` directory and run the `bin/supervise` script again. If you want a clean start after stopping the services, delete the `var` directory and run the `bin/start-micro-quickstart` script again.
Once every service has started, you are now ready to load data. Once every service has started, you are now ready to load data.

View File

@ -34,7 +34,7 @@ else
CONFDIR="$2" CONFDIR="$2"
fi fi
CONFDIR="$(cd "$CONFDIR" && pwd)/druid" CONFDIR="$(cd "$CONFDIR" && pwd)"
WHEREAMI="$(cd "$WHEREAMI" && pwd)" WHEREAMI="$(cd "$WHEREAMI" && pwd)"
cd "$WHEREAMI/.." cd "$WHEREAMI/.."

View File

@ -1,4 +1,5 @@
# #!/bin/bash -eu
# Licensed to the Apache Software Foundation (ASF) under one # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file # or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information # distributed with this work for additional information
@ -15,12 +16,9 @@
# KIND, either express or implied. See the License for the # KIND, either express or implied. See the License for the
# specific language governing permissions and limitations # specific language governing permissions and limitations
# under the License. # under the License.
#
druid.service=druid/overlord PWD="$(pwd)"
druid.plaintextPort=8090 WHEREAMI="$(dirname "$0")"
WHEREAMI="$(cd "$WHEREAMI" && pwd)"
druid.indexer.queue.startDelay=PT30S exec "$WHEREAMI/supervise" -c "$WHEREAMI/../conf/supervise/cluster/data.conf"
druid.indexer.runner.type=remote
druid.indexer.storage.type=metadata

View File

@ -0,0 +1,24 @@
#!/bin/bash -eu
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
PWD="$(pwd)"
WHEREAMI="$(dirname "$0")"
WHEREAMI="$(cd "$WHEREAMI" && pwd)"
exec "$WHEREAMI/supervise" -c "$WHEREAMI/../conf/supervise/cluster/master-no-zk.conf"

View File

@ -0,0 +1,24 @@
#!/bin/bash -eu
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
PWD="$(pwd)"
WHEREAMI="$(dirname "$0")"
WHEREAMI="$(cd "$WHEREAMI" && pwd)"
exec "$WHEREAMI/supervise" -c "$WHEREAMI/../conf/supervise/cluster/master-with-zk.conf"

View File

@ -1,4 +1,5 @@
# #!/bin/bash -eu
# Licensed to the Apache Software Foundation (ASF) under one # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file # or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information # distributed with this work for additional information
@ -15,10 +16,9 @@
# KIND, either express or implied. See the License for the # KIND, either express or implied. See the License for the
# specific language governing permissions and limitations # specific language governing permissions and limitations
# under the License. # under the License.
#
druid.service=druid/coordinator PWD="$(pwd)"
druid.plaintextPort=8081 WHEREAMI="$(dirname "$0")"
WHEREAMI="$(cd "$WHEREAMI" && pwd)"
druid.coordinator.startDelay=PT30S exec "$WHEREAMI/supervise" -c "$WHEREAMI/../conf/supervise/cluster/query.conf"
druid.coordinator.period=PT30S

View File

@ -0,0 +1,24 @@
#!/bin/bash -eu
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
PWD="$(pwd)"
WHEREAMI="$(dirname "$0")"
WHEREAMI="$(cd "$WHEREAMI" && pwd)"
exec "$WHEREAMI/supervise" -c "$WHEREAMI/../conf/supervise/single-server/micro-quickstart.conf"

View File

@ -0,0 +1,24 @@
#!/bin/bash -eu
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
PWD="$(pwd)"
WHEREAMI="$(dirname "$0")"
WHEREAMI="$(cd "$WHEREAMI" && pwd)"
exec "$WHEREAMI/supervise" -c "$WHEREAMI/../conf/supervise/single-server/large.conf"

View File

@ -0,0 +1,24 @@
#!/bin/bash -eu
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
PWD="$(pwd)"
WHEREAMI="$(dirname "$0")"
WHEREAMI="$(cd "$WHEREAMI" && pwd)"
exec "$WHEREAMI/supervise" -c "$WHEREAMI/../conf/supervise/single-server/medium.conf"

View File

@ -0,0 +1,24 @@
#!/bin/bash -eu
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
PWD="$(pwd)"
WHEREAMI="$(dirname "$0")"
WHEREAMI="$(cd "$WHEREAMI" && pwd)"
exec "$WHEREAMI/supervise" -c "$WHEREAMI/../conf/supervise/single-server/small.conf"

View File

@ -0,0 +1,24 @@
#!/bin/bash -eu
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
PWD="$(pwd)"
WHEREAMI="$(dirname "$0")"
WHEREAMI="$(cd "$WHEREAMI" && pwd)"
exec "$WHEREAMI/supervise" -c "$WHEREAMI/../conf/supervise/single-server/xlarge.conf"

View File

@ -1,32 +0,0 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one
~ or more contributor license agreements. See the NOTICE file
~ distributed with this work for additional information
~ regarding copyright ownership. The ASF licenses this file
~ to you under the Apache License, Version 2.0 (the
~ "License"); you may not use this file except in compliance
~ with the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing,
~ software distributed under the License is distributed on an
~ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
~ KIND, either express or implied. See the License for the
~ specific language governing permissions and limitations
~ under the License.
-->
<Configuration status="WARN">
<Appenders>
<Console name="Console" target="SYSTEM_OUT">
<PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/>
</Console>
</Appenders>
<Loggers>
<Root level="info">
<AppenderRef ref="Console"/>
</Root>
</Loggers>
</Configuration>

View File

@ -24,7 +24,7 @@
# If you specify `druid.extensions.loadList=[]`, Druid won't load any extension from file system. # If you specify `druid.extensions.loadList=[]`, Druid won't load any extension from file system.
# If you don't specify `druid.extensions.loadList`, Druid will load all the extensions under root extension directory. # If you don't specify `druid.extensions.loadList`, Druid will load all the extensions under root extension directory.
# More info: http://druid.io/docs/latest/operations/including-extensions.html # More info: http://druid.io/docs/latest/operations/including-extensions.html
druid.extensions.loadList=["druid-hdfs-storage", "druid-kafka-indexing-service"] druid.extensions.loadList=["druid-hdfs-storage", "druid-kafka-indexing-service", "druid-datasketches"]
# If you have a different version of Hadoop, place your Hadoop client jar files in your hadoop-dependencies directory # If you have a different version of Hadoop, place your Hadoop client jar files in your hadoop-dependencies directory
# and uncomment the line below to point to your directory. # and uncomment the line below to point to your directory.

View File

@ -1,7 +1,7 @@
-server -server
-Xms8g -Xms8g
-Xmx8g -Xmx8g
-XX:MaxDirectMemorySize=4096m -XX:MaxDirectMemorySize=14g
-XX:+ExitOnOutOfMemoryError -XX:+ExitOnOutOfMemoryError
-Duser.timezone=UTC -Duser.timezone=UTC
-Dfile.encoding=UTF-8 -Dfile.encoding=UTF-8

View File

@ -21,15 +21,17 @@ druid.service=druid/historical
druid.plaintextPort=8083 druid.plaintextPort=8083
# HTTP server threads # HTTP server threads
druid.server.http.numThreads=25 druid.server.http.numThreads=60
# Processing threads and buffers # Processing threads and buffers
druid.processing.buffer.sizeBytes=536870912 druid.processing.buffer.sizeBytes=500000000
druid.processing.numThreads=7 druid.processing.numMergeBuffers=4
druid.processing.numThreads=16
druid.processing.tmpDir=var/druid/processing
# Segment storage # Segment storage
druid.segmentCache.locations=[{"path":"var/druid/segment-cache","maxSize":130000000000}] druid.segmentCache.locations=[{"path":"var/druid/segment-cache","maxSize":300000000000}]
druid.server.maxSize=130000000000 druid.server.maxSize=300000000000
# Query cache # Query cache
druid.historical.cache.useCache=true druid.historical.cache.useCache=true

View File

@ -21,18 +21,19 @@ druid.service=druid/middleManager
druid.plaintextPort=8091 druid.plaintextPort=8091
# Number of tasks per middleManager # Number of tasks per middleManager
druid.worker.capacity=3 druid.worker.capacity=4
# Task launch parameters # Task launch parameters
druid.indexer.runner.javaOpts=-server -Xmx2g -Duser.timezone=UTC -Dfile.encoding=UTF-8 -XX:+ExitOnOutOfMemoryError -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager druid.indexer.runner.javaOpts=-server -Xms1g -Xmx1g -Duser.timezone=UTC -Dfile.encoding=UTF-8 -XX:+ExitOnOutOfMemoryError -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager
druid.indexer.task.baseTaskDir=var/druid/task druid.indexer.task.baseTaskDir=var/druid/task
# HTTP server threads # HTTP server threads
druid.server.http.numThreads=25 druid.server.http.numThreads=60
# Processing threads and buffers on Peons # Processing threads and buffers on Peons
druid.indexer.fork.property.druid.processing.buffer.sizeBytes=536870912 druid.indexer.fork.property.druid.processing.numMergeBuffers=2
druid.indexer.fork.property.druid.processing.numThreads=2 druid.indexer.fork.property.druid.processing.buffer.sizeBytes=500000000
druid.indexer.fork.property.druid.processing.numThreads=1
# Hadoop indexing # Hadoop indexing
druid.indexer.task.hadoopWorkingPath=var/druid/hadoop-tmp druid.indexer.task.hadoopWorkingPath=var/druid/hadoop-tmp

View File

@ -1,6 +1,6 @@
-server -server
-Xms3g -Xms1g
-Xmx3g -Xmx1g
-XX:+ExitOnOutOfMemoryError -XX:+ExitOnOutOfMemoryError
-Duser.timezone=UTC -Duser.timezone=UTC
-Dfile.encoding=UTF-8 -Dfile.encoding=UTF-8

View File

@ -1,6 +1,6 @@
-server -server
-Xms3g -Xms1g
-Xmx3g -Xmx1g
-XX:+ExitOnOutOfMemoryError -XX:+ExitOnOutOfMemoryError
-Duser.timezone=UTC -Duser.timezone=UTC
-Dfile.encoding=UTF-8 -Dfile.encoding=UTF-8

View File

@ -1,7 +1,7 @@
-server -server
-Xms24g -Xms24g
-Xmx24g -Xmx24g
-XX:MaxDirectMemorySize=4096m -XX:MaxDirectMemorySize=12g
-XX:+ExitOnOutOfMemoryError -XX:+ExitOnOutOfMemoryError
-Duser.timezone=UTC -Duser.timezone=UTC
-Dfile.encoding=UTF-8 -Dfile.encoding=UTF-8

View File

@ -0,0 +1,38 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
druid.service=druid/broker
druid.plaintextPort=8082
# HTTP server settings
druid.server.http.numThreads=60
# HTTP client settings
druid.broker.http.numConnections=50
druid.broker.http.maxQueuedBytes=5000000
# Processing threads and buffers
druid.processing.buffer.sizeBytes=500000000
druid.processing.numMergeBuffers=16
druid.processing.numThreads=1
druid.processing.tmpDir=var/druid/processing
# Query cache disabled -- push down caching and merging instead
druid.broker.cache.useCache=false
druid.broker.cache.populateCache=false

View File

@ -0,0 +1,10 @@
-server
-Xms1g
-Xmx1g
-XX:+UseG1GC
-XX:MaxDirectMemorySize=128m
-XX:+ExitOnOutOfMemoryError
-Duser.timezone=UTC
-Dfile.encoding=UTF-8
-Djava.io.tmpdir=var/tmp
-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager

View File

@ -17,18 +17,25 @@
# under the License. # under the License.
# #
# # Extensions specified in the load list will be loaded by Druid
# Extensions # We are using local fs for deep storage - not recommended for production - use S3, HDFS, or NFS instead
# # We are using local derby for the metadata store - not recommended for production - use MySQL or Postgres instead
# This is not the full list of Druid extensions, but common ones that people often use. You may need to change this list # If you specify `druid.extensions.loadList=[]`, Druid won't load any extension from file system.
# based on your particular setup. # If you don't specify `druid.extensions.loadList`, Druid will load all the extensions under root extension directory.
druid.extensions.loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global"] # More info: http://druid.io/docs/latest/operations/including-extensions.html
druid.extensions.loadList=["druid-hdfs-storage", "druid-kafka-indexing-service", "druid-datasketches"]
# If you have a different version of Hadoop, place your Hadoop client jar files in your hadoop-dependencies directory # If you have a different version of Hadoop, place your Hadoop client jar files in your hadoop-dependencies directory
# and uncomment the line below to point to your directory. # and uncomment the line below to point to your directory.
#druid.extensions.hadoopDependenciesDir=/my/dir/hadoop-dependencies #druid.extensions.hadoopDependenciesDir=/my/dir/hadoop-dependencies
#
# Hostname
#
druid.host=localhost
# #
# Logging # Logging
# #
@ -40,7 +47,7 @@ druid.startup.logging.logProperties=true
# Zookeeper # Zookeeper
# #
druid.zk.service.host=zk.host.ip druid.zk.service.host=localhost
druid.zk.paths.base=/druid druid.zk.paths.base=/druid
# #
@ -49,8 +56,8 @@ druid.zk.paths.base=/druid
# For Derby server on your Druid Coordinator (only viable in a cluster with a single Coordinator, no fail-over): # For Derby server on your Druid Coordinator (only viable in a cluster with a single Coordinator, no fail-over):
druid.metadata.storage.type=derby druid.metadata.storage.type=derby
druid.metadata.storage.connector.connectURI=jdbc:derby://metadata.store.ip:1527/var/druid/metadata.db;create=true druid.metadata.storage.connector.connectURI=jdbc:derby://localhost:1527/var/druid/metadata.db;create=true
druid.metadata.storage.connector.host=metadata.store.ip druid.metadata.storage.connector.host=localhost
druid.metadata.storage.connector.port=1527 druid.metadata.storage.connector.port=1527
# For MySQL (make sure to include the MySQL JDBC driver on the classpath): # For MySQL (make sure to include the MySQL JDBC driver on the classpath):
@ -59,7 +66,7 @@ druid.metadata.storage.connector.port=1527
#druid.metadata.storage.connector.user=... #druid.metadata.storage.connector.user=...
#druid.metadata.storage.connector.password=... #druid.metadata.storage.connector.password=...
# For PostgreSQL (make sure to additionally include the Postgres extension): # For PostgreSQL:
#druid.metadata.storage.type=postgresql #druid.metadata.storage.type=postgresql
#druid.metadata.storage.connector.connectURI=jdbc:postgresql://db.example.com:5432/druid #druid.metadata.storage.connector.connectURI=jdbc:postgresql://db.example.com:5432/druid
#druid.metadata.storage.connector.user=... #druid.metadata.storage.connector.user=...
@ -73,7 +80,7 @@ druid.metadata.storage.connector.port=1527
druid.storage.type=local druid.storage.type=local
druid.storage.storageDirectory=var/druid/segments druid.storage.storageDirectory=var/druid/segments
# For HDFS (make sure to include the HDFS extension and that your Hadoop config files in the cp): # For HDFS:
#druid.storage.type=hdfs #druid.storage.type=hdfs
#druid.storage.storageDirectory=/druid/segments #druid.storage.storageDirectory=/druid/segments
@ -92,7 +99,7 @@ druid.storage.storageDirectory=var/druid/segments
druid.indexer.logs.type=file druid.indexer.logs.type=file
druid.indexer.logs.directory=var/druid/indexing-logs druid.indexer.logs.directory=var/druid/indexing-logs
# For HDFS (make sure to include the HDFS extension and that your Hadoop config files in the cp): # For HDFS:
#druid.indexer.logs.type=hdfs #druid.indexer.logs.type=hdfs
#druid.indexer.logs.directory=/druid/indexing-logs #druid.indexer.logs.directory=/druid/indexing-logs
@ -113,7 +120,7 @@ druid.selectors.coordinator.serviceName=druid/coordinator
# #
druid.monitoring.monitors=["org.apache.druid.java.util.metrics.JvmMonitor"] druid.monitoring.monitors=["org.apache.druid.java.util.metrics.JvmMonitor"]
druid.emitter=logging druid.emitter=noop
druid.emitter.logging.logLevel=info druid.emitter.logging.logLevel=info
# Storage type of double columns # Storage type of double columns
@ -121,7 +128,18 @@ druid.emitter.logging.logLevel=info
druid.indexing.doubleStorage=double druid.indexing.doubleStorage=double
#
# Security
#
druid.server.hiddenProperties=["druid.s3.accessKey","druid.s3.secretKey","druid.metadata.storage.connector.password"]
# #
# SQL # SQL
# #
druid.sql.enable=true druid.sql.enable=true
#
# Lookups
#
druid.lookup.enableLookupSyncOnStartup=false

View File

@ -0,0 +1,33 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one
~ or more contributor license agreements. See the NOTICE file
~ distributed with this work for additional information
~ regarding copyright ownership. The ASF licenses this file
~ to you under the Apache License, Version 2.0 (the
~ "License"); you may not use this file except in compliance
~ with the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing,
~ software distributed under the License is distributed on an
~ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
~ KIND, either express or implied. See the License for the
~ specific language governing permissions and limitations
~ under the License.
-->
<Configuration status="WARN">
<Appenders>
<Console name="Console" target="SYSTEM_OUT">
<PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/>
</Console>
</Appenders>
<Loggers>
<Root level="info">
<AppenderRef ref="Console"/>
</Root>
</Loggers>
</Configuration>

View File

@ -0,0 +1,10 @@
-server
-Xms16g
-Xmx16g
-XX:MaxDirectMemorySize=8g
-XX:+ExitOnOutOfMemoryError
-XX:+UseG1GC
-Duser.timezone=UTC
-Dfile.encoding=UTF-8
-Djava.io.tmpdir=var/tmp
-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager

View File

@ -0,0 +1 @@
org.apache.druid.cli.Main server broker

View File

@ -0,0 +1,38 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
druid.service=druid/broker
druid.plaintextPort=8082
# HTTP server settings
druid.server.http.numThreads=60
# HTTP client settings
druid.broker.http.numConnections=50
druid.broker.http.maxQueuedBytes=5000000
# Processing threads and buffers
druid.processing.buffer.sizeBytes=500000000
druid.processing.numMergeBuffers=8
druid.processing.numThreads=1
druid.processing.tmpDir=var/druid/processing
# Query cache disabled -- push down caching and merging instead
druid.broker.cache.useCache=false
druid.broker.cache.populateCache=false

View File

@ -0,0 +1,10 @@
-server
-Xms24g
-Xmx24g
-XX:+ExitOnOutOfMemoryError
-XX:+UseG1GC
-Duser.timezone=UTC
-Dfile.encoding=UTF-8
-Djava.io.tmpdir=var/tmp
-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager
-Dderby.stream.error.file=var/druid/derby.log

View File

@ -0,0 +1 @@
org.apache.druid.cli.Main server coordinator

View File

@ -0,0 +1,33 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
druid.service=druid/coordinator
druid.plaintextPort=8081
druid.coordinator.startDelay=PT10S
druid.coordinator.period=PT5S
# Run the overlord service in the coordinator process
druid.coordinator.asOverlord.enabled=true
druid.coordinator.asOverlord.overlordService=druid/overlord
druid.indexer.queue.startDelay=PT5S
druid.indexer.runner.type=remote
druid.indexer.storage.type=metadata

View File

@ -0,0 +1,10 @@
-server
-Xms16g
-Xmx16g
-XX:MaxDirectMemorySize=32g
-XX:+ExitOnOutOfMemoryError
-XX:+UseG1GC
-Duser.timezone=UTC
-Dfile.encoding=UTF-8
-Djava.io.tmpdir=var/tmp
-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager

View File

@ -0,0 +1 @@
org.apache.druid.cli.Main server historical

View File

@ -0,0 +1,40 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
druid.service=druid/historical
druid.plaintextPort=8083
# HTTP server threads
druid.server.http.numThreads=60
# Processing threads and buffers
druid.processing.buffer.sizeBytes=500000000
druid.processing.numMergeBuffers=8
druid.processing.numThreads=32
druid.processing.tmpDir=var/druid/processing
# Segment storage
druid.segmentCache.locations=[{"path":"var/druid/segment-cache","maxSize":300000000000}]
druid.server.maxSize=300000000000
# Query cache
druid.historical.cache.useCache=true
druid.historical.cache.populateCache=true
druid.cache.type=caffeine
druid.cache.sizeInBytes=1000000000

View File

@ -1,8 +1,8 @@
-server -server
-Xms256m -Xms256m
-Xmx256m -Xmx256m
-XX:MaxDirectMemorySize=768m
-XX:+ExitOnOutOfMemoryError -XX:+ExitOnOutOfMemoryError
-XX:+UseG1GC
-Duser.timezone=UTC -Duser.timezone=UTC
-Dfile.encoding=UTF-8 -Dfile.encoding=UTF-8
-Djava.io.tmpdir=var/tmp -Djava.io.tmpdir=var/tmp

View File

@ -0,0 +1 @@
org.apache.druid.cli.Main server middleManager

View File

@ -0,0 +1,39 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
druid.service=druid/middleManager
druid.plaintextPort=8091
# Number of tasks per middleManager
druid.worker.capacity=8
# Task launch parameters
druid.indexer.runner.javaOpts=-server -Xms1g -Xmx1g -Duser.timezone=UTC -Dfile.encoding=UTF-8 -XX:+ExitOnOutOfMemoryError -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager
druid.indexer.task.baseTaskDir=var/druid/task
# HTTP server threads
druid.server.http.numThreads=60
# Processing threads and buffers on Peons
druid.indexer.fork.property.druid.processing.numMergeBuffers=2
druid.indexer.fork.property.druid.processing.buffer.sizeBytes=100000000
druid.indexer.fork.property.druid.processing.numThreads=1
# Hadoop indexing
druid.indexer.task.hadoopWorkingPath=var/druid/hadoop-tmp

View File

@ -0,0 +1,10 @@
-server
-Xms1g
-Xmx1g
-XX:+UseG1GC
-XX:MaxDirectMemorySize=128m
-XX:+ExitOnOutOfMemoryError
-Duser.timezone=UTC
-Dfile.encoding=UTF-8
-Djava.io.tmpdir=var/tmp
-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager

View File

@ -0,0 +1 @@
org.apache.druid.cli.Main server router

View File

@ -0,0 +1,145 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Extensions specified in the load list will be loaded by Druid
# We are using local fs for deep storage - not recommended for production - use S3, HDFS, or NFS instead
# We are using local derby for the metadata store - not recommended for production - use MySQL or Postgres instead
# If you specify `druid.extensions.loadList=[]`, Druid won't load any extension from file system.
# If you don't specify `druid.extensions.loadList`, Druid will load all the extensions under root extension directory.
# More info: http://druid.io/docs/latest/operations/including-extensions.html
druid.extensions.loadList=["druid-hdfs-storage", "druid-kafka-indexing-service", "druid-datasketches"]
# If you have a different version of Hadoop, place your Hadoop client jar files in your hadoop-dependencies directory
# and uncomment the line below to point to your directory.
#druid.extensions.hadoopDependenciesDir=/my/dir/hadoop-dependencies
#
# Hostname
#
druid.host=localhost
#
# Logging
#
# Log all runtime properties on startup. Disable to avoid logging properties on startup:
druid.startup.logging.logProperties=true
#
# Zookeeper
#
druid.zk.service.host=localhost
druid.zk.paths.base=/druid
#
# Metadata storage
#
# For Derby server on your Druid Coordinator (only viable in a cluster with a single Coordinator, no fail-over):
druid.metadata.storage.type=derby
druid.metadata.storage.connector.connectURI=jdbc:derby://localhost:1527/var/druid/metadata.db;create=true
druid.metadata.storage.connector.host=localhost
druid.metadata.storage.connector.port=1527
# For MySQL (make sure to include the MySQL JDBC driver on the classpath):
#druid.metadata.storage.type=mysql
#druid.metadata.storage.connector.connectURI=jdbc:mysql://db.example.com:3306/druid
#druid.metadata.storage.connector.user=...
#druid.metadata.storage.connector.password=...
# For PostgreSQL:
#druid.metadata.storage.type=postgresql
#druid.metadata.storage.connector.connectURI=jdbc:postgresql://db.example.com:5432/druid
#druid.metadata.storage.connector.user=...
#druid.metadata.storage.connector.password=...
#
# Deep storage
#
# For local disk (only viable in a cluster if this is a network mount):
druid.storage.type=local
druid.storage.storageDirectory=var/druid/segments
# For HDFS:
#druid.storage.type=hdfs
#druid.storage.storageDirectory=/druid/segments
# For S3:
#druid.storage.type=s3
#druid.storage.bucket=your-bucket
#druid.storage.baseKey=druid/segments
#druid.s3.accessKey=...
#druid.s3.secretKey=...
#
# Indexing service logs
#
# For local disk (only viable in a cluster if this is a network mount):
druid.indexer.logs.type=file
druid.indexer.logs.directory=var/druid/indexing-logs
# For HDFS:
#druid.indexer.logs.type=hdfs
#druid.indexer.logs.directory=/druid/indexing-logs
# For S3:
#druid.indexer.logs.type=s3
#druid.indexer.logs.s3Bucket=your-bucket
#druid.indexer.logs.s3Prefix=druid/indexing-logs
#
# Service discovery
#
druid.selectors.indexing.serviceName=druid/overlord
druid.selectors.coordinator.serviceName=druid/coordinator
#
# Monitoring
#
druid.monitoring.monitors=["org.apache.druid.java.util.metrics.JvmMonitor"]
druid.emitter=noop
druid.emitter.logging.logLevel=info
# Storage type of double columns
# ommiting this will lead to index double as float at the storage layer
druid.indexing.doubleStorage=double
#
# Security
#
druid.server.hiddenProperties=["druid.s3.accessKey","druid.s3.secretKey","druid.metadata.storage.connector.password"]
#
# SQL
#
druid.sql.enable=true
#
# Lookups
#
druid.lookup.enableLookupSyncOnStartup=false

View File

@ -0,0 +1,33 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one
~ or more contributor license agreements. See the NOTICE file
~ distributed with this work for additional information
~ regarding copyright ownership. The ASF licenses this file
~ to you under the Apache License, Version 2.0 (the
~ "License"); you may not use this file except in compliance
~ with the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing,
~ software distributed under the License is distributed on an
~ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
~ KIND, either express or implied. See the License for the
~ specific language governing permissions and limitations
~ under the License.
-->
<Configuration status="WARN">
<Appenders>
<Console name="Console" target="SYSTEM_OUT">
<PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/>
</Console>
</Appenders>
<Loggers>
<Root level="info">
<AppenderRef ref="Console"/>
</Root>
</Loggers>
</Configuration>

View File

@ -0,0 +1,10 @@
-server
-Xms8g
-Xmx8g
-XX:MaxDirectMemorySize=16g
-XX:+ExitOnOutOfMemoryError
-XX:+UseG1GC
-Duser.timezone=UTC
-Dfile.encoding=UTF-8
-Djava.io.tmpdir=var/tmp
-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager

View File

@ -0,0 +1 @@
org.apache.druid.cli.Main server broker

View File

@ -0,0 +1,38 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
druid.service=druid/broker
druid.plaintextPort=8082
# HTTP server settings
druid.server.http.numThreads=60
# HTTP client settings
druid.broker.http.numConnections=50
druid.broker.http.maxQueuedBytes=5000000
# Processing threads and buffers
druid.processing.buffer.sizeBytes=500000000
druid.processing.numMergeBuffers=4
druid.processing.numThreads=1
druid.processing.tmpDir=var/druid/processing
# Query cache disabled -- push down caching and merging instead
druid.broker.cache.useCache=false
druid.broker.cache.populateCache=false

View File

@ -0,0 +1,10 @@
-server
-Xms12g
-Xmx12g
-XX:+ExitOnOutOfMemoryError
-XX:+UseG1GC
-Duser.timezone=UTC
-Dfile.encoding=UTF-8
-Djava.io.tmpdir=var/tmp
-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager
-Dderby.stream.error.file=var/druid/derby.log

View File

@ -0,0 +1 @@
org.apache.druid.cli.Main server coordinator

View File

@ -0,0 +1,33 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
druid.service=druid/coordinator
druid.plaintextPort=8081
druid.coordinator.startDelay=PT10S
druid.coordinator.period=PT5S
# Run the overlord service in the coordinator process
druid.coordinator.asOverlord.enabled=true
druid.coordinator.asOverlord.overlordService=druid/overlord
druid.indexer.queue.startDelay=PT5S
druid.indexer.runner.type=remote
druid.indexer.storage.type=metadata

View File

@ -0,0 +1,10 @@
-server
-Xms8g
-Xmx8g
-XX:MaxDirectMemorySize=13g
-XX:+ExitOnOutOfMemoryError
-XX:+UseG1GC
-Duser.timezone=UTC
-Dfile.encoding=UTF-8
-Djava.io.tmpdir=var/tmp
-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager

View File

@ -0,0 +1 @@
org.apache.druid.cli.Main server historical

View File

@ -0,0 +1,40 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
druid.service=druid/historical
druid.plaintextPort=8083
# HTTP server threads
druid.server.http.numThreads=60
# Processing threads and buffers
druid.processing.buffer.sizeBytes=500000000
druid.processing.numMergeBuffers=4
druid.processing.numThreads=16
druid.processing.tmpDir=var/druid/processing
# Segment storage
druid.segmentCache.locations=[{"path":"var/druid/segment-cache","maxSize":300000000000}]
druid.server.maxSize=300000000000
# Query cache
druid.historical.cache.useCache=true
druid.historical.cache.populateCache=true
druid.cache.type=caffeine
druid.cache.sizeInBytes=256000000

View File

@ -1,8 +1,8 @@
-server -server
-Xms256m -Xms256m
-Xmx256m -Xmx256m
-XX:MaxDirectMemorySize=768m
-XX:+ExitOnOutOfMemoryError -XX:+ExitOnOutOfMemoryError
-XX:+UseG1GC
-Duser.timezone=UTC -Duser.timezone=UTC
-Dfile.encoding=UTF-8 -Dfile.encoding=UTF-8
-Djava.io.tmpdir=var/tmp -Djava.io.tmpdir=var/tmp

View File

@ -0,0 +1 @@
org.apache.druid.cli.Main server middleManager

View File

@ -0,0 +1,39 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
druid.service=druid/middleManager
druid.plaintextPort=8091
# Number of tasks per middleManager
druid.worker.capacity=4
# Task launch parameters
druid.indexer.runner.javaOpts=-server -Xms1g -Xmx1g -Duser.timezone=UTC -Dfile.encoding=UTF-8 -XX:+ExitOnOutOfMemoryError -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager
druid.indexer.task.baseTaskDir=var/druid/task
# HTTP server threads
druid.server.http.numThreads=60
# Processing threads and buffers on Peons
druid.indexer.fork.property.druid.processing.numMergeBuffers=2
druid.indexer.fork.property.druid.processing.buffer.sizeBytes=100000000
druid.indexer.fork.property.druid.processing.numThreads=1
# Hadoop indexing
druid.indexer.task.hadoopWorkingPath=var/druid/hadoop-tmp

View File

@ -2,7 +2,7 @@
-Xms512m -Xms512m
-Xmx512m -Xmx512m
-XX:+UseG1GC -XX:+UseG1GC
-XX:MaxDirectMemorySize=512m -XX:MaxDirectMemorySize=128m
-XX:+ExitOnOutOfMemoryError -XX:+ExitOnOutOfMemoryError
-Duser.timezone=UTC -Duser.timezone=UTC
-Dfile.encoding=UTF-8 -Dfile.encoding=UTF-8

View File

@ -0,0 +1 @@
org.apache.druid.cli.Main server router

View File

@ -0,0 +1,34 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
druid.service=druid/router
druid.plaintextPort=8888
# HTTP proxy
druid.router.http.numConnections=50
druid.router.http.readTimeout=PT5M
druid.router.http.numMaxThreads=100
druid.server.http.numThreads=100
# Service discovery
druid.router.defaultBrokerServiceName=druid/broker
druid.router.coordinatorServiceName=druid/coordinator
# Management proxy to coordinator / overlord: required for unified web console.
druid.router.managementProxy.enabled=true

View File

@ -0,0 +1,145 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Extensions specified in the load list will be loaded by Druid
# We are using local fs for deep storage - not recommended for production - use S3, HDFS, or NFS instead
# We are using local derby for the metadata store - not recommended for production - use MySQL or Postgres instead
# If you specify `druid.extensions.loadList=[]`, Druid won't load any extension from file system.
# If you don't specify `druid.extensions.loadList`, Druid will load all the extensions under root extension directory.
# More info: http://druid.io/docs/latest/operations/including-extensions.html
druid.extensions.loadList=["druid-hdfs-storage", "druid-kafka-indexing-service", "druid-datasketches"]
# If you have a different version of Hadoop, place your Hadoop client jar files in your hadoop-dependencies directory
# and uncomment the line below to point to your directory.
#druid.extensions.hadoopDependenciesDir=/my/dir/hadoop-dependencies
#
# Hostname
#
druid.host=localhost
#
# Logging
#
# Log all runtime properties on startup. Disable to avoid logging properties on startup:
druid.startup.logging.logProperties=true
#
# Zookeeper
#
druid.zk.service.host=localhost
druid.zk.paths.base=/druid
#
# Metadata storage
#
# For Derby server on your Druid Coordinator (only viable in a cluster with a single Coordinator, no fail-over):
druid.metadata.storage.type=derby
druid.metadata.storage.connector.connectURI=jdbc:derby://localhost:1527/var/druid/metadata.db;create=true
druid.metadata.storage.connector.host=localhost
druid.metadata.storage.connector.port=1527
# For MySQL (make sure to include the MySQL JDBC driver on the classpath):
#druid.metadata.storage.type=mysql
#druid.metadata.storage.connector.connectURI=jdbc:mysql://db.example.com:3306/druid
#druid.metadata.storage.connector.user=...
#druid.metadata.storage.connector.password=...
# For PostgreSQL:
#druid.metadata.storage.type=postgresql
#druid.metadata.storage.connector.connectURI=jdbc:postgresql://db.example.com:5432/druid
#druid.metadata.storage.connector.user=...
#druid.metadata.storage.connector.password=...
#
# Deep storage
#
# For local disk (only viable in a cluster if this is a network mount):
druid.storage.type=local
druid.storage.storageDirectory=var/druid/segments
# For HDFS:
#druid.storage.type=hdfs
#druid.storage.storageDirectory=/druid/segments
# For S3:
#druid.storage.type=s3
#druid.storage.bucket=your-bucket
#druid.storage.baseKey=druid/segments
#druid.s3.accessKey=...
#druid.s3.secretKey=...
#
# Indexing service logs
#
# For local disk (only viable in a cluster if this is a network mount):
druid.indexer.logs.type=file
druid.indexer.logs.directory=var/druid/indexing-logs
# For HDFS:
#druid.indexer.logs.type=hdfs
#druid.indexer.logs.directory=/druid/indexing-logs
# For S3:
#druid.indexer.logs.type=s3
#druid.indexer.logs.s3Bucket=your-bucket
#druid.indexer.logs.s3Prefix=druid/indexing-logs
#
# Service discovery
#
druid.selectors.indexing.serviceName=druid/overlord
druid.selectors.coordinator.serviceName=druid/coordinator
#
# Monitoring
#
druid.monitoring.monitors=["org.apache.druid.java.util.metrics.JvmMonitor"]
druid.emitter=noop
druid.emitter.logging.logLevel=info
# Storage type of double columns
# ommiting this will lead to index double as float at the storage layer
druid.indexing.doubleStorage=double
#
# Security
#
druid.server.hiddenProperties=["druid.s3.accessKey","druid.s3.secretKey","druid.metadata.storage.connector.password"]
#
# SQL
#
druid.sql.enable=true
#
# Lookups
#
druid.lookup.enableLookupSyncOnStartup=false

View File

@ -0,0 +1,33 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one
~ or more contributor license agreements. See the NOTICE file
~ distributed with this work for additional information
~ regarding copyright ownership. The ASF licenses this file
~ to you under the Apache License, Version 2.0 (the
~ "License"); you may not use this file except in compliance
~ with the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing,
~ software distributed under the License is distributed on an
~ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
~ KIND, either express or implied. See the License for the
~ specific language governing permissions and limitations
~ under the License.
-->
<Configuration status="WARN">
<Appenders>
<Console name="Console" target="SYSTEM_OUT">
<PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/>
</Console>
</Appenders>
<Loggers>
<Root level="info">
<AppenderRef ref="Console"/>
</Root>
</Loggers>
</Configuration>

View File

@ -0,0 +1,10 @@
-server
-Xms512m
-Xmx512m
-XX:MaxDirectMemorySize=768m
-XX:+ExitOnOutOfMemoryError
-XX:+UseG1GC
-Duser.timezone=UTC
-Dfile.encoding=UTF-8
-Djava.io.tmpdir=var/tmp
-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager

View File

@ -0,0 +1 @@
org.apache.druid.cli.Main server broker

View File

@ -0,0 +1,10 @@
-server
-Xms256m
-Xmx256m
-XX:+ExitOnOutOfMemoryError
-XX:+UseG1GC
-Duser.timezone=UTC
-Dfile.encoding=UTF-8
-Djava.io.tmpdir=var/tmp
-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager
-Dderby.stream.error.file=var/druid/derby.log

View File

@ -0,0 +1 @@
org.apache.druid.cli.Main server coordinator

View File

@ -0,0 +1,33 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
druid.service=druid/coordinator
druid.plaintextPort=8081
druid.coordinator.startDelay=PT10S
druid.coordinator.period=PT5S
# Run the overlord service in the coordinator process
druid.coordinator.asOverlord.enabled=true
druid.coordinator.asOverlord.overlordService=druid/overlord
druid.indexer.queue.startDelay=PT5S
druid.indexer.runner.type=remote
druid.indexer.storage.type=metadata

View File

@ -0,0 +1,10 @@
-server
-Xms512m
-Xmx512m
-XX:MaxDirectMemorySize=1280m
-XX:+ExitOnOutOfMemoryError
-XX:+UseG1GC
-Duser.timezone=UTC
-Dfile.encoding=UTF-8
-Djava.io.tmpdir=var/tmp
-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager

View File

@ -0,0 +1 @@
org.apache.druid.cli.Main server historical

View File

@ -24,8 +24,8 @@ druid.plaintextPort=8083
druid.server.http.numThreads=12 druid.server.http.numThreads=12
# Processing threads and buffers # Processing threads and buffers
druid.processing.buffer.sizeBytes=100000000 druid.processing.buffer.sizeBytes=200000000
druid.processing.numMergeBuffers=1 druid.processing.numMergeBuffers=2
druid.processing.numThreads=2 druid.processing.numThreads=2
druid.processing.tmpDir=var/druid/processing druid.processing.tmpDir=var/druid/processing

View File

@ -2,6 +2,7 @@
-Xms64m -Xms64m
-Xmx64m -Xmx64m
-XX:+ExitOnOutOfMemoryError -XX:+ExitOnOutOfMemoryError
-XX:+UseG1GC
-Duser.timezone=UTC -Duser.timezone=UTC
-Dfile.encoding=UTF-8 -Dfile.encoding=UTF-8
-Djava.io.tmpdir=var/tmp -Djava.io.tmpdir=var/tmp

View File

@ -0,0 +1 @@
org.apache.druid.cli.Main server middleManager

View File

@ -0,0 +1,39 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
druid.service=druid/middleManager
druid.plaintextPort=8091
# Number of tasks per middleManager
druid.worker.capacity=2
# Task launch parameters
druid.indexer.runner.javaOpts=-server -Xms1g -Xmx1g -Duser.timezone=UTC -Dfile.encoding=UTF-8 -XX:+ExitOnOutOfMemoryError -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager
druid.indexer.task.baseTaskDir=var/druid/task
# HTTP server threads
druid.server.http.numThreads=12
# Processing threads and buffers on Peons
druid.indexer.fork.property.druid.processing.numMergeBuffers=2
druid.indexer.fork.property.druid.processing.buffer.sizeBytes=100000000
druid.indexer.fork.property.druid.processing.numThreads=1
# Hadoop indexing
druid.indexer.task.hadoopWorkingPath=var/druid/hadoop-tmp

View File

@ -0,0 +1 @@
org.apache.druid.cli.Main server router

View File

@ -0,0 +1,34 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
druid.service=druid/router
druid.plaintextPort=8888
# HTTP proxy
druid.router.http.numConnections=50
druid.router.http.readTimeout=PT5M
druid.router.http.numMaxThreads=100
druid.server.http.numThreads=100
# Service discovery
druid.router.defaultBrokerServiceName=druid/broker
druid.router.coordinatorServiceName=druid/coordinator
# Management proxy to coordinator / overlord: required for unified web console.
druid.router.managementProxy.enabled=true

View File

@ -0,0 +1,145 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Extensions specified in the load list will be loaded by Druid
# We are using local fs for deep storage - not recommended for production - use S3, HDFS, or NFS instead
# We are using local derby for the metadata store - not recommended for production - use MySQL or Postgres instead
# If you specify `druid.extensions.loadList=[]`, Druid won't load any extension from file system.
# If you don't specify `druid.extensions.loadList`, Druid will load all the extensions under root extension directory.
# More info: http://druid.io/docs/latest/operations/including-extensions.html
druid.extensions.loadList=["druid-hdfs-storage", "druid-kafka-indexing-service", "druid-datasketches"]
# If you have a different version of Hadoop, place your Hadoop client jar files in your hadoop-dependencies directory
# and uncomment the line below to point to your directory.
#druid.extensions.hadoopDependenciesDir=/my/dir/hadoop-dependencies
#
# Hostname
#
druid.host=localhost
#
# Logging
#
# Log all runtime properties on startup. Disable to avoid logging properties on startup:
druid.startup.logging.logProperties=true
#
# Zookeeper
#
druid.zk.service.host=localhost
druid.zk.paths.base=/druid
#
# Metadata storage
#
# For Derby server on your Druid Coordinator (only viable in a cluster with a single Coordinator, no fail-over):
druid.metadata.storage.type=derby
druid.metadata.storage.connector.connectURI=jdbc:derby://localhost:1527/var/druid/metadata.db;create=true
druid.metadata.storage.connector.host=localhost
druid.metadata.storage.connector.port=1527
# For MySQL (make sure to include the MySQL JDBC driver on the classpath):
#druid.metadata.storage.type=mysql
#druid.metadata.storage.connector.connectURI=jdbc:mysql://db.example.com:3306/druid
#druid.metadata.storage.connector.user=...
#druid.metadata.storage.connector.password=...
# For PostgreSQL:
#druid.metadata.storage.type=postgresql
#druid.metadata.storage.connector.connectURI=jdbc:postgresql://db.example.com:5432/druid
#druid.metadata.storage.connector.user=...
#druid.metadata.storage.connector.password=...
#
# Deep storage
#
# For local disk (only viable in a cluster if this is a network mount):
druid.storage.type=local
druid.storage.storageDirectory=var/druid/segments
# For HDFS:
#druid.storage.type=hdfs
#druid.storage.storageDirectory=/druid/segments
# For S3:
#druid.storage.type=s3
#druid.storage.bucket=your-bucket
#druid.storage.baseKey=druid/segments
#druid.s3.accessKey=...
#druid.s3.secretKey=...
#
# Indexing service logs
#
# For local disk (only viable in a cluster if this is a network mount):
druid.indexer.logs.type=file
druid.indexer.logs.directory=var/druid/indexing-logs
# For HDFS:
#druid.indexer.logs.type=hdfs
#druid.indexer.logs.directory=/druid/indexing-logs
# For S3:
#druid.indexer.logs.type=s3
#druid.indexer.logs.s3Bucket=your-bucket
#druid.indexer.logs.s3Prefix=druid/indexing-logs
#
# Service discovery
#
druid.selectors.indexing.serviceName=druid/overlord
druid.selectors.coordinator.serviceName=druid/coordinator
#
# Monitoring
#
druid.monitoring.monitors=["org.apache.druid.java.util.metrics.JvmMonitor"]
druid.emitter=noop
druid.emitter.logging.logLevel=info
# Storage type of double columns
# ommiting this will lead to index double as float at the storage layer
druid.indexing.doubleStorage=double
#
# Security
#
druid.server.hiddenProperties=["druid.s3.accessKey","druid.s3.secretKey","druid.metadata.storage.connector.password"]
#
# SQL
#
druid.sql.enable=true
#
# Lookups
#
druid.lookup.enableLookupSyncOnStartup=false

View File

@ -0,0 +1,33 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one
~ or more contributor license agreements. See the NOTICE file
~ distributed with this work for additional information
~ regarding copyright ownership. The ASF licenses this file
~ to you under the Apache License, Version 2.0 (the
~ "License"); you may not use this file except in compliance
~ with the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing,
~ software distributed under the License is distributed on an
~ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
~ KIND, either express or implied. See the License for the
~ specific language governing permissions and limitations
~ under the License.
-->
<Configuration status="WARN">
<Appenders>
<Console name="Console" target="SYSTEM_OUT">
<PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/>
</Console>
</Appenders>
<Loggers>
<Root level="info">
<AppenderRef ref="Console"/>
</Root>
</Loggers>
</Configuration>

View File

@ -0,0 +1,10 @@
-server
-Xms4g
-Xmx4g
-XX:MaxDirectMemorySize=3g
-XX:+ExitOnOutOfMemoryError
-XX:+UseG1GC
-Duser.timezone=UTC
-Dfile.encoding=UTF-8
-Djava.io.tmpdir=var/tmp
-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager

View File

@ -0,0 +1 @@
org.apache.druid.cli.Main server broker

View File

@ -21,15 +21,17 @@ druid.service=druid/broker
druid.plaintextPort=8082 druid.plaintextPort=8082
# HTTP server settings # HTTP server settings
druid.server.http.numThreads=60 druid.server.http.numThreads=50
# HTTP client settings # HTTP client settings
druid.broker.http.numConnections=10 druid.broker.http.numConnections=40
druid.broker.http.maxQueuedBytes=5000000
# Processing threads and buffers # Processing threads and buffers
druid.processing.buffer.sizeBytes=536870912 druid.processing.buffer.sizeBytes=500000000
druid.processing.numMergeBuffers=2 druid.processing.numMergeBuffers=2
druid.processing.numThreads=1 druid.processing.numThreads=1
druid.processing.tmpDir=var/druid/processing
# Query cache disabled -- push down caching and merging instead # Query cache disabled -- push down caching and merging instead
druid.broker.cache.useCache=false druid.broker.cache.useCache=false

View File

@ -1,7 +1,8 @@
-server -server
-Xms128m -Xms6g
-Xmx128m -Xmx6g
-XX:+ExitOnOutOfMemoryError -XX:+ExitOnOutOfMemoryError
-XX:+UseG1GC
-Duser.timezone=UTC -Duser.timezone=UTC
-Dfile.encoding=UTF-8 -Dfile.encoding=UTF-8
-Djava.io.tmpdir=var/tmp -Djava.io.tmpdir=var/tmp

View File

@ -0,0 +1 @@
org.apache.druid.cli.Main server coordinator

View File

@ -0,0 +1,33 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
druid.service=druid/coordinator
druid.plaintextPort=8081
druid.coordinator.startDelay=PT10S
druid.coordinator.period=PT5S
# Run the overlord service in the coordinator process
druid.coordinator.asOverlord.enabled=true
druid.coordinator.asOverlord.overlordService=druid/overlord
druid.indexer.queue.startDelay=PT5S
druid.indexer.runner.type=remote
druid.indexer.storage.type=metadata

View File

@ -0,0 +1,10 @@
-server
-Xms4g
-Xmx4g
-XX:MaxDirectMemorySize=8g
-XX:+ExitOnOutOfMemoryError
-XX:+UseG1GC
-Duser.timezone=UTC
-Dfile.encoding=UTF-8
-Djava.io.tmpdir=var/tmp
-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager

View File

@ -0,0 +1 @@
org.apache.druid.cli.Main server historical

View File

@ -0,0 +1,40 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
druid.service=druid/historical
druid.plaintextPort=8083
# HTTP server threads
druid.server.http.numThreads=50
# Processing threads and buffers
druid.processing.buffer.sizeBytes=500000000
druid.processing.numMergeBuffers=2
druid.processing.numThreads=8
druid.processing.tmpDir=var/druid/processing
# Segment storage
druid.segmentCache.locations=[{"path":"var/druid/segment-cache","maxSize":300000000000}]
druid.server.maxSize=300000000000
# Query cache
druid.historical.cache.useCache=true
druid.historical.cache.populateCache=true
druid.cache.type=caffeine
druid.cache.sizeInBytes=256000000

View File

@ -1,7 +1,8 @@
-server -server
-Xms64m -Xms128m
-Xmx64m -Xmx128m
-XX:+ExitOnOutOfMemoryError -XX:+ExitOnOutOfMemoryError
-XX:+UseG1GC
-Duser.timezone=UTC -Duser.timezone=UTC
-Dfile.encoding=UTF-8 -Dfile.encoding=UTF-8
-Djava.io.tmpdir=var/tmp -Djava.io.tmpdir=var/tmp

View File

@ -0,0 +1 @@
org.apache.druid.cli.Main server middleManager

View File

@ -28,11 +28,12 @@ druid.indexer.runner.javaOpts=-server -Xms1g -Xmx1g -Duser.timezone=UTC -Dfile.e
druid.indexer.task.baseTaskDir=var/druid/task druid.indexer.task.baseTaskDir=var/druid/task
# HTTP server threads # HTTP server threads
druid.server.http.numThreads=9 druid.server.http.numThreads=50
# Processing threads and buffers on Peons # Processing threads and buffers on Peons
druid.indexer.fork.property.druid.processing.buffer.sizeBytes=201326592 druid.indexer.fork.property.druid.processing.numMergeBuffers=2
druid.indexer.fork.property.druid.processing.numThreads=2 druid.indexer.fork.property.druid.processing.buffer.sizeBytes=100000000
druid.indexer.fork.property.druid.processing.numThreads=1
# Hadoop indexing # Hadoop indexing
druid.indexer.task.hadoopWorkingPath=var/druid/hadoop-tmp druid.indexer.task.hadoopWorkingPath=var/druid/hadoop-tmp

View File

@ -0,0 +1,11 @@
-server
-Xms512m
-Xmx512m
-XX:+UseG1GC
-XX:MaxDirectMemorySize=128m
-XX:+ExitOnOutOfMemoryError
-XX:+UseG1GC
-Duser.timezone=UTC
-Dfile.encoding=UTF-8
-Djava.io.tmpdir=var/tmp
-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager

Some files were not shown because too many files have changed in this diff Show More