druid/integration-tests-ex/cases/cluster/Common/environment-configs/common.env

118 lines
4.3 KiB
Bash

# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
LANG=C.UTF-8
LANGUAGE=C.UTF-8
LC_ALL=C.UTF-8
# JAVA OPTS
# -XX:HeapDumpPath set in container based on the service and intance
DRUID_COMMON_JAVA_OPTS=-server -Duser.timezone=UTC -Dfile.encoding=UTF-8 -XX:+UseG1GC -XX:+ExitOnOutOfMemoryError -XX:+HeapDumpOnOutOfMemoryError
DRUID_SERVICE_JAVA_OPTS=
# Debugging. Enabled by default. Always uses port 8000 in the container.
# May to unique ports onto the host in Docker Compose.
DEBUG_PORT=
DEBUG_OPTS=-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=8000
# Extra classpath. Standard classpath includes Druid libs, Hadoop config, Druid config.
DRUID_CLASSPATH=
# Number of an instance when there are two or more of the same type.
DRUID_INSTANCE=
# Druid configs
# Mostly adapted from the micro-quickstart/_common file.
# Hostname
# druid.host is set on each host by the launch script
# Extensions specified in the load list will be loaded by Druid at runtime.
# The extension jars must be installed as part of Druid, or via the image
# build script.
#
# The launch script creates druid_extensions_loadList by combining two test-specific
# variables: druid_standard_loadList defined here, and druid_test_loadList, defined
# in a docker-compose.yaml file, for any test-specific extensions.
# See compose.md for more details.
druid_standard_loadList=mysql-metadata-storage,druid-it-tools,druid-lookups-cached-global,druid-histogram,druid-datasketches,druid-parquet-extensions,druid-avro-extensions,druid-protobuf-extensions,druid-orc-extensions,druid-kafka-indexing-service,druid-s3-extensions,druid-multi-stage-query
# Location of Hadoop dependencies provided at runtime in the shared directory.
druid_extensions_hadoopDependenciesDir=/shared/hadoop-dependencies
# Logging
druid_startup_logging_logProperties=true
# Zookeeper
# Name of the ZK container, mapped to a host name
druid_zk_service_host=zookeeper
druid_zk_paths_base=/druid
# Metadata storage
# For MySQL (MySQL JDBC installed in $DRUID_HOME/lib)
druid_metadata_storage_type=mysql
druid_metadata_storage_connector_connectURI=jdbc:mysql://metadata/druid
druid_metadata_storage_connector_user=druid
druid_metadata_storage_connector_password=diurd
# Deep storage
druid_storage_type=local
druid_storage_storageDirectory=/shared/druid/storage
# Indexing service logs
druid_indexer_logs_type=file
druid_indexer_logs_directory=/shared/tasklogs
# Service discovery
druid_selectors_indexing_serviceName=druid/overlord
druid_selectors_coordinator_serviceName=druid/coordinator
# Monitoring
druid_monitoring_monitors=["org.apache.druid.java.util.metrics.JvmMonitor"]
druid_emitter=noop
druid_emitter_logging_logLevel=info
# Storage type of double columns
druid_indexing_doubleStorage=double
# SQL
druid_sql_enable=true
druid_sql_planner_authorizeSystemTablesDirectly=true
# Lookups
druid_lookup_numLookupLoadingThreads=1
# Test-specific
druid_server_http_numThreads=20
# Allow OPTIONS method for ITBasicAuthConfigurationTest.testSystemSchemaAccess
druid_auth_basic_common_maxSyncRetries=20
druid_request_logging_type=slf4j
# TODO: Can these 4 move to the Coordinator config?
druid_coordinator_kill_supervisor_on=true
druid_coordinator_kill_supervisor_period=PT10S
druid_coordinator_kill_supervisor_durationToRetain=PT0M
druid_coordinator_period_metadataStoreManagementPeriod=PT10S
# TODO: Can the following be moved to Overlord? Or removed?
# Testing the legacy config from https://github.com/apache/druid/pull/10267
# Can remove this when the flag is no longer needed
druid_indexer_task_ignoreTimestampSpecForDruidInputSource=true
# TODO: Pass this from the test (AzureDeepStorage)