druid/integration-tests/k8s/tiny-cluster.yaml

348 lines
10 KiB
YAML

# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: "druid.apache.org/v1alpha1"
kind: "Druid"
metadata:
name: tiny-cluster
spec:
image: druid/cluster:v1
# Optionally specify image for all nodes. Can be specify on nodes also
# imagePullSecrets:
# - name: tutu
startScript: /druid.sh
podLabels:
environment: stage
release: alpha
podAnnotations:
dummy: k8s_extn_needs_atleast_one_annotation
readinessProbe:
httpGet:
path: /status/health
port: 8088
securityContext:
fsGroup: 0
runAsUser: 0
runAsGroup: 0
containerSecurityContext:
privileged: true
services:
- spec:
type: ClusterIP
clusterIP: None
commonConfigMountPath: "/opt/druid/conf/druid/cluster/_common"
jvm.options: |-
-server
-XX:MaxDirectMemorySize=10240g
-Duser.timezone=UTC
-Dfile.encoding=UTF-8
-Dlog4j.debug
-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager
log4j.config: |-
<?xml version="1.0" encoding="UTF-8" ?>
<Configuration status="WARN">
<Appenders>
<Console name="Console" target="SYSTEM_OUT">
<PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/>
</Console>
</Appenders>
<Loggers>
<Root level="info">
<AppenderRef ref="Console"/>
</Root>
</Loggers>
</Configuration>
common.runtime.properties: |
#
# Zookeeper-less Druid Cluster
#
druid.zk.service.enabled=false
druid.discovery.type=k8s
druid.discovery.k8s.clusterIdentifier=druid-it
druid.serverview.type=http
druid.coordinator.loadqueuepeon.type=http
druid.indexer.runner.type=httpRemote
# Metadata Store
druid.metadata.storage.type=derby
druid.metadata.storage.connector.connectURI=jdbc:derby://localhost:1527/var/druid/metadata.db;create=true
druid.metadata.storage.connector.host=localhost
druid.metadata.storage.connector.port=1527
druid.metadata.storage.connector.createTables=true
# Deep Storage
druid.storage.type=local
druid.storage.storageDirectory=/druid/data/deepstorage
#
# Extensions
#
druid.extensions.loadList=["druid-avro-extensions","druid-hdfs-storage", "druid-kafka-indexing-service", "druid-datasketches", "druid-kubernetes-extensions"]
#
# Service discovery
#
druid.selectors.indexing.serviceName=druid/overlord
druid.selectors.coordinator.serviceName=druid/coordinator
druid.indexer.logs.type=file
druid.indexer.logs.directory=/druid/data/task-logs
druid.indexer.task.baseDir=/druid/data/task-base
druid.lookup.enableLookupSyncOnStartup=false
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
nodes:
brokers:
# Optionally specify for running broker as Deployment
# kind: Deployment
nodeType: "broker"
# Optionally specify for broker nodes
# imagePullSecrets:
# - name: tutu
druid.port: 8088
services:
- spec:
type: NodePort
ports:
- name: broker-service-port
nodePort: 30100
port: 8088
protocol: TCP
targetPort: 8088
selector:
nodeSpecUniqueStr: druid-tiny-cluster-brokers
metadata:
name: broker-%s-service
- spec:
type: ClusterIP
clusterIP: None
nodeConfigMountPath: "/opt/druid/conf/druid/cluster/query/broker"
replicas: 1
runtime.properties: |
druid.service=druid/broker
# HTTP server threads
druid.broker.http.numConnections=5
druid.server.http.numThreads=40
# Processing threads and buffers
druid.processing.buffer.sizeBytes=25000000
druid.processing.numThreads=1
druid.sql.enable=true
extra.jvm.options: |-
-Xmx512m
-Xms512m
volumeMounts:
- mountPath: /druid/data
name: data-volume
volumes:
- name: data-volume
hostPath:
path: REPLACE_VOLUMES/tmp
resources:
requests:
memory: "800Mi"
limits:
memory: "800Mi"
coordinators:
# Optionally specify for running coordinator as Deployment
# kind: Deployment
nodeType: "coordinator"
druid.port: 8088
services:
- spec:
type: NodePort
ports:
- name: coordinator-service-port
nodePort: 30200
port: 8088
protocol: TCP
targetPort: 8088
selector:
nodeSpecUniqueStr: druid-tiny-cluster-coordinators
metadata:
name: coordinator-%s-service
- spec:
type: ClusterIP
clusterIP: None
nodeConfigMountPath: "/opt/druid/conf/druid/cluster/master/coordinator-overlord"
replicas: 1
runtime.properties: |
druid.service=druid/coordinator
# HTTP server threads
druid.coordinator.startDelay=PT30S
druid.coordinator.period=PT30S
# Configure this coordinator to also run as Overlord
druid.coordinator.asOverlord.enabled=true
druid.coordinator.asOverlord.overlordService=druid/overlord
druid.indexer.queue.startDelay=PT30S
extra.jvm.options: |-
-Xmx800m
-Xms800m
volumeMounts:
- mountPath: /druid/data
name: data-volume
volumes:
- name: data-volume
hostPath:
path: REPLACE_VOLUMES/tmp
resources:
requests:
memory: "1G"
limits:
memory: "1G"
historicals:
nodeType: "historical"
druid.port: 8088
services:
- spec:
type: NodePort
ports:
- name: historical-service-port
nodePort: 30300
port: 8088
protocol: TCP
targetPort: 8088
selector:
nodeSpecUniqueStr: druid-tiny-cluster-historicals
metadata:
name: historical-%s-service
- spec:
type: ClusterIP
clusterIP: None
nodeConfigMountPath: "/opt/druid/conf/druid/cluster/data/historical"
replicas: 1
runtime.properties: |
druid.service=druid/historical
druid.processing.buffer.sizeBytes=25000000
druid.processing.numThreads=2
# Segment storage
druid.segmentCache.locations=[{"path":"/druid/data/segments","maxSize":10737418240}]
druid.server.maxSize=10737418240
extra.jvm.options: |-
-Xmx512m
-Xms512m
volumeMounts:
- mountPath: /druid/data
name: data-volume
volumes:
- name: data-volume
hostPath:
path: REPLACE_VOLUMES/tmp
resources:
requests:
memory: "1G"
limits:
memory: "1G"
routers:
nodeType: "router"
druid.port: 8088
services:
- spec:
type: NodePort
ports:
- name: router-service-port
nodePort: 30400
port: 8088
protocol: TCP
targetPort: 8088
selector:
nodeSpecUniqueStr: druid-tiny-cluster-routers
metadata:
name: router-%s-service
- spec:
type: ClusterIP
clusterIP: None
nodeConfigMountPath: "/opt/druid/conf/druid/cluster/query/router"
replicas: 1
runtime.properties: |
druid.service=druid/router
druid.plaintextPort=8088
# HTTP proxy
druid.router.http.numConnections=50
druid.router.http.readTimeout=PT5M
druid.router.http.numMaxThreads=100
druid.server.http.numThreads=100
# Service discovery
druid.router.defaultBrokerServiceName=druid/broker
druid.router.coordinatorServiceName=druid/coordinator
# Management proxy to coordinator / overlord: required for unified web console.
druid.router.managementProxy.enabled=true
middlemanagers:
nodeType: "middleManager"
nodeConfigMountPath: "/opt/druid/conf/druid/cluster/data/middleManager"
druid.port: 8088
services:
- spec:
type: NodePort
ports:
- name: middlemanager-service-port
nodePort: 30500
port: 8088
protocol: TCP
targetPort: 8088
selector:
nodeSpecUniqueStr: druid-tiny-cluster-middleManagers
metadata:
name: middlemanager-%s-service
- spec:
type: ClusterIP
clusterIP: None
replicas: 1
runtime.properties: |
druid.service=druid/middleManager
druid.worker.capacity=2
druid.indexer.runner.javaOpts=-server -Xms128m -Xmx128m -XX:MaxDirectMemorySize=256m -Duser.timezone=UTC -Dfile.encoding=UTF-8 -Djava.io.tmpdir=/druid/data/tmp -XX:+ExitOnOutOfMemoryError -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager
druid.indexer.task.baseTaskDir=/druid/data/baseTaskDir
druid.server.http.numThreads=10
druid.indexer.fork.property.druid.processing.buffer.sizeBytes=25000000
druid.indexer.fork.property.druid.processing.numMergeBuffers=2
druid.indexer.fork.property.druid.processing.numThreads=1
extra.jvm.options: |-
-Xmx64m
-Xms64m
volumeMounts:
- mountPath: /druid/data
name: data-volume
volumes:
- name: data-volume
hostPath:
path: REPLACE_VOLUMES/tmp
resources:
requests:
memory: "1G"
limits:
memory: "1G"