From 5dadbdf4d051a6843c87a05948cade84eb042a40 Mon Sep 17 00:00:00 2001 From: Paul Rogers Date: Tue, 21 Feb 2023 15:03:02 -0800 Subject: [PATCH] Generate the IT docker-compose.yaml files (#13669) Generate IT docker-compose.sh files Generates test-specific docker-compose.sh files using a simple Python template script. --- .gitignore | 1 + docs/development/build.md | 5 +- integration-tests-ex/cases/cluster.sh | 330 ++++++++------ .../AzureDeepStorage/docker-compose.yaml | 132 ------ .../BatchIndex/docker-compose-indexer.yaml | 98 ---- .../cluster/BatchIndex/docker-compose.yaml | 98 ---- .../cases/cluster/Common/druid.yaml | 14 + .../environment-configs/coordinator.env | 6 + .../GcsDeepStorage/docker-compose.yaml | 155 ------- .../HighAvailability/docker-compose.yaml | 157 ------- .../MultiStageQuery/docker-compose.yaml | 98 ---- .../cluster/S3DeepStorage/docker-compose.yaml | 129 ------ .../testsEx/cluster/DruidClusterClient.java | 5 +- .../druid/testsEx/config/ClusterConfig.java | 5 + .../cases/templates/AzureDeepStorage.py | 43 ++ .../cases/templates/BatchIndex.py | 18 + .../cases/templates/GcsDeepStorage.py | 45 ++ .../cases/templates/HighAvailability.py | 85 ++++ .../cases/templates/MultiStageQuery.py | 26 ++ .../cases/templates/S3DeepStorage.py | 46 ++ .../cases/templates/template.py | 430 ++++++++++++++++++ integration-tests-ex/docs/compose.md | 58 +++ integration-tests-ex/docs/docker.md | 1 - integration-tests-ex/docs/druid-config.md | 2 +- integration-tests-ex/docs/guide.md | 42 ++ integration-tests-ex/image/build-image.sh | 1 - it.sh | 42 +- 27 files changed, 1057 insertions(+), 1015 deletions(-) delete mode 100644 integration-tests-ex/cases/cluster/AzureDeepStorage/docker-compose.yaml delete mode 100644 integration-tests-ex/cases/cluster/BatchIndex/docker-compose-indexer.yaml delete mode 100644 integration-tests-ex/cases/cluster/BatchIndex/docker-compose.yaml delete mode 100644 integration-tests-ex/cases/cluster/GcsDeepStorage/docker-compose.yaml delete mode 100644 integration-tests-ex/cases/cluster/HighAvailability/docker-compose.yaml delete mode 100644 integration-tests-ex/cases/cluster/MultiStageQuery/docker-compose.yaml delete mode 100644 integration-tests-ex/cases/cluster/S3DeepStorage/docker-compose.yaml create mode 100644 integration-tests-ex/cases/templates/AzureDeepStorage.py create mode 100644 integration-tests-ex/cases/templates/BatchIndex.py create mode 100644 integration-tests-ex/cases/templates/GcsDeepStorage.py create mode 100644 integration-tests-ex/cases/templates/HighAvailability.py create mode 100644 integration-tests-ex/cases/templates/MultiStageQuery.py create mode 100644 integration-tests-ex/cases/templates/S3DeepStorage.py create mode 100644 integration-tests-ex/cases/templates/template.py diff --git a/.gitignore b/.gitignore index d6ecf2b7952..8365e4a3b7c 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,7 @@ target *.tar.gz *.swp *.swo +*.pyc .classpath .idea .project diff --git a/docs/development/build.md b/docs/development/build.md index b093b2e4f99..15f0689631a 100644 --- a/docs/development/build.md +++ b/docs/development/build.md @@ -38,7 +38,8 @@ make sure it has `/master/` in the URL. ##### Other dependencies -- Distribution builds require Python 3.x and the `pyyaml` module +- Distribution builds require Python 3.x and the `pyyaml` module. +- Integration tests require `pyyaml` version 5.1 or later. ##### Downloading the source @@ -86,7 +87,7 @@ mvn clean install -Phadoop3 To generate distribution with hadoop3 dependencies, run : ```bash -mvn clean install -Papache-release,dist-hadoop3,rat,hadoop3 -DskipTests +mvn clean install -Papache-release,dist-hadoop3,rat,hadoop3 -DskipTests ``` #### Potential issues diff --git a/integration-tests-ex/cases/cluster.sh b/integration-tests-ex/cases/cluster.sh index 3c5bea3f81a..f7c6cb92f0b 100755 --- a/integration-tests-ex/cases/cluster.sh +++ b/integration-tests-ex/cases/cluster.sh @@ -21,16 +21,21 @@ # Maps category names to cluster names. The mapping here must match # that in the test category classes when @Cluster is used. +# Fail if any command fails +set -e + # Enable for debugging #set -x export MODULE_DIR=$(cd $(dirname $0) && pwd) function usage { - cat <&2 - exit 1 + usage 1>&2 + exit 1 fi CMD=$1 shift -# All commands need env vars -ENV_FILE=$MODULE_DIR/../image/target/env.sh -if [ ! -f $ENV_FILE ]; then - echo "Please build the Docker test image before testing" 1>&2 - exit 1 -fi +function check_env_file { + export ENV_FILE=$MODULE_DIR/../image/target/env.sh + if [ ! -f $ENV_FILE ]; then + echo "Please build the Docker test image before testing" 1>&2 + exit 1 + fi -source $ENV_FILE + source $ENV_FILE +} function category { - if [ $# -eq 0 ]; then - usage 1>&2 - exit 1 - fi - export CATEGORY=$1 - # The untranslated category is used for the local name of the - # shared folder. + if [ $# -eq 0 ]; then + usage 1>&2 + exit 1 + fi + export CATEGORY=$1 + # The untranslated category is used for the local name of the + # shared folder. - # DRUID_INTEGRATION_TEST_GROUP is used in - # docker-compose files and here. Despite the name, it is the - # name of the cluster configuration we want to run, not the - # test category. Multiple categories can map to the same cluster - # definition. + # DRUID_INTEGRATION_TEST_GROUP is used in + # docker-compose files and here. Despite the name, it is the + # name of the cluster configuration we want to run, not the + # test category. Multiple categories can map to the same cluster + # definition. - # Map from category name to shared cluster definition name. - # Add an entry here if you create a new category that shares - # a definition. - case $CATEGORY in - "InputSource") - export DRUID_INTEGRATION_TEST_GROUP=BatchIndex - ;; - "InputFormat") - export DRUID_INTEGRATION_TEST_GROUP=BatchIndex - ;; - "Catalog") - export DRUID_INTEGRATION_TEST_GROUP=BatchIndex - ;; - *) - export DRUID_INTEGRATION_TEST_GROUP=$CATEGORY - ;; - esac + # Map from category name to shared cluster definition name. + # Add an entry here if you create a new category that shares + # a definition. + case $CATEGORY in + "InputSource") + export DRUID_INTEGRATION_TEST_GROUP=BatchIndex + ;; + "InputFormat") + export DRUID_INTEGRATION_TEST_GROUP=BatchIndex + ;; + "Catalog") + export DRUID_INTEGRATION_TEST_GROUP=BatchIndex + ;; + *) + export DRUID_INTEGRATION_TEST_GROUP=$CATEGORY + ;; + esac - export CLUSTER_DIR=$MODULE_DIR/cluster/$DRUID_INTEGRATION_TEST_GROUP - if [ ! -d $CLUSTER_DIR ]; then - echo "Cluster directory $CLUSTER_DIR does not exist." 1>&2 - echo "$USAGE" 1>&2 - exit 1 - fi - - export TARGET_DIR=$MODULE_DIR/target - export SHARED_DIR=$TARGET_DIR/$CATEGORY - export ENV_FILE="$TARGET_DIR/${CATEGORY}.env" + export CLUSTER_DIR=$MODULE_DIR/cluster/$DRUID_INTEGRATION_TEST_GROUP + export TARGET_DIR=$MODULE_DIR/target + export SHARED_DIR=$TARGET_DIR/$CATEGORY + export ENV_FILE="$TARGET_DIR/${CATEGORY}.env" } # Dump lots of information to debug Docker failures when run inside # of a build environment where we can't inspect Docker directly. function show_status { - echo "====================================" - ls -l target/shared - echo "docker ps -a" - docker ps -a - # Was: --filter status=exited - for id in $(docker ps -a --format "{{.ID}}"); do - echo "====================================" - echo "Logs for Container ID $id" - docker logs $id | tail -n 20 - done - echo "====================================" + echo "====================================" + ls -l target/shared + echo "docker ps -a" + docker ps -a + # Was: --filter status=exited + for id in $(docker ps -a --format "{{.ID}}"); do + echo "====================================" + echo "Logs for Container ID $id" + docker logs $id | tail -n 20 + done + echo "====================================" } function build_shared_dir { - mkdir -p $SHARED_DIR - # Must start with an empty DB to keep MySQL happy - rm -rf $SHARED_DIR/db - mkdir -p $SHARED_DIR/logs - mkdir -p $SHARED_DIR/tasklogs - mkdir -p $SHARED_DIR/db - mkdir -p $SHARED_DIR/kafka - mkdir -p $SHARED_DIR/resources - cp $MODULE_DIR/assets/log4j2.xml $SHARED_DIR/resources - # Permissions in some build setups are screwed up. See above. The user - # which runs Docker does not have permission to write into the /shared - # directory. Force ownership to allow writing. - chmod -R a+rwx $SHARED_DIR + mkdir -p $SHARED_DIR + # Must start with an empty DB to keep MySQL happy + rm -rf $SHARED_DIR/db + mkdir -p $SHARED_DIR/logs + mkdir -p $SHARED_DIR/tasklogs + mkdir -p $SHARED_DIR/db + mkdir -p $SHARED_DIR/kafka + mkdir -p $SHARED_DIR/resources + cp $MODULE_DIR/assets/log4j2.xml $SHARED_DIR/resources + # Permissions in some build setups are screwed up. See above. The user + # which runs Docker does not have permission to write into the /shared + # directory. Force ownership to allow writing. + chmod -R a+rwx $SHARED_DIR } -# Each test must have a default docker-compose.yaml file which corresponds to using +# Either generate the docker-compose file, or use "static" versions. +function docker_file { + + # If a template exists, generate the docker-compose.yaml file. Copy over the Common + # folder. + TEMPLATE_DIR=$MODULE_DIR/templates + TEMPLATE_SCRIPT=${DRUID_INTEGRATION_TEST_GROUP}.py + if [ -f "$TEMPLATE_DIR/$TEMPLATE_SCRIPT" ]; then + export COMPOSE_DIR=$TARGET_DIR/cluster/$DRUID_INTEGRATION_TEST_GROUP + mkdir -p $COMPOSE_DIR + pushd $TEMPLATE_DIR > /dev/null + python3 $TEMPLATE_SCRIPT + popd > /dev/null + cp -r $MODULE_DIR/cluster/Common $TARGET_DIR/cluster + else + # Else, use the existing non-template file in place. + if [ ! -d $CLUSTER_DIR ]; then + echo "Cluster directory $CLUSTER_DIR does not exist." 1>&2 + echo "$USAGE" 1>&2 + exit 1 + fi + export COMPOSE_DIR=$CLUSTER_DIR + choose_static_file + fi +} + +# Each test that uses static (non-generated) docker compose files +# must have a default docker-compose.yaml file which corresponds to using # the MiddleManager (or no indexer). A test can optionally include a second file called # docker-compose-indexer.yaml which uses the Indexer in place of Middle Manager. -function docker_file { - compose_args="" - if [ -n "$USE_INDEXER" ]; then - # Sanity check: USE_INDEXER must be "indexer" or "middleManager" - # if it is set at all. - if [ "$USE_INDEXER" != "indexer" ] && [ "$USE_INDEXER" != "middleManager" ] - then - echo "USE_INDEXER must be 'indexer' or 'middleManager' (is '$USE_INDEXER')" 1>&2 - exit 1 - fi - if [ "$USE_INDEXER" == "indexer" ]; then - compose_file=docker-compose-indexer.yaml - if [ ! -f "$CLUSTER_DIR/$compose_file" ]; then - echo "USE_INDEXER=$USE_INDEXER, but $CLUSTER_DIR/$compose_file is missing" 1>&2 - exit 1 - fi - compose_args="-f $compose_file" - fi - fi - echo $compose_args +function choose_static_file { + export DOCKER_ARGS="" + if [ -n "$USE_INDEXER" ]; then + # Sanity check: USE_INDEXER must be "indexer" or "middleManager" + # if it is set at all. + if [ "$USE_INDEXER" != "indexer" ] && [ "$USE_INDEXER" != "middleManager" ] + then + echo "USE_INDEXER must be 'indexer' or 'middleManager' (it is '$USE_INDEXER')" 1>&2 + exit 1 + fi + if [ "$USE_INDEXER" == "indexer" ]; then + compose_file=docker-compose-indexer.yaml + if [ ! -f "$CLUSTER_DIR/$compose_file" ]; then + echo "USE_INDEXER=$USE_INDEXER, but $CLUSTER_DIR/$compose_file is missing" 1>&2 + exit 1 + fi + export DOCKER_ARGS="-f $compose_file" + fi + fi } +function verify_docker_file { + if [ -f "$CLUSTER_DIR/docker-compose.yaml" ]; then + # Use the existing non-template file in place. + export COMPOSE_DIR=$CLUSTER_DIR + return 0 + fi + + # The docker compose file must have been generated via up + export COMPOSE_DIR=$TARGET_DIR/cluster/$DRUID_INTEGRATION_TEST_GROUP + if [ ! -f "$COMPOSE_DIR/docker-compose.yaml" ]; then + echo "$COMPOSE_DIR/docker-compose.yaml is missing. Is cluster up? Did you do a 'clean' after 'up'?" 1>&2 + fi +} + +# Determine if docker-compose is available. If not, assume Docker supports +# the compose subcommand +set +e +if which docker-compose > /dev/null +then + DOCKER_COMPOSE='docker-compose' +else + DOCKER_COMPOSE='docker compose' +fi +set -e + # Print environment for debugging #env @@ -177,37 +231,57 @@ fi set -e case $CMD in - "-h" ) - usage - ;; - "help" ) - usage - $DOCKER_COMPOSE help - ;; - "up" ) - category $* - echo "Starting cluster $DRUID_INTEGRATION_TEST_GROUP" - build_shared_dir - cd $CLUSTER_DIR - $DOCKER_COMPOSE `docker_file` up -d - # Enable the following for debugging - #show_status - ;; - "status" ) - category $* - cd $CLUSTER_DIR - show_status - ;; - "down" ) - category $* - # Enable the following for debugging - #show_status - cd $CLUSTER_DIR - $DOCKER_COMPOSE `docker_file` $CMD - ;; - "*" ) - category $* - cd $CLUSTER_DIR - $DOCKER_COMPOSE `docker_file` $CMD - ;; + "-h" ) + usage + ;; + "help" ) + usage + $DOCKER_COMPOSE help + ;; + "prepare" ) + check_env_file + category $* + build_shared_dir + docker_file + ;; + "gen" ) + category $* + build_shared_dir + docker_file + echo "Generated file is in $COMPOSE_DIR" + ;; + "up" ) + check_env_file + category $* + echo "Starting cluster $DRUID_INTEGRATION_TEST_GROUP" + build_shared_dir + docker_file + cd $COMPOSE_DIR + $DOCKER_COMPOSE $DOCKER_ARGS up -d + # Enable the following for debugging + #show_status + ;; + "status" ) + check_env_file + category $* + docker_file + cd $COMPOSE_DIR + show_status + ;; + "down" ) + check_env_file + category $* + # Enable the following for debugging + #show_status + verify_docker_file + cd $COMPOSE_DIR + $DOCKER_COMPOSE $DOCKER_ARGS $CMD + ;; + "*" ) + check_env_file + category $* + verify_docker_file + cd $COMPOSE_DIR + $DOCKER_COMPOSE $DOCKER_ARGS $CMD + ;; esac diff --git a/integration-tests-ex/cases/cluster/AzureDeepStorage/docker-compose.yaml b/integration-tests-ex/cases/cluster/AzureDeepStorage/docker-compose.yaml deleted file mode 100644 index 75d9b046297..00000000000 --- a/integration-tests-ex/cases/cluster/AzureDeepStorage/docker-compose.yaml +++ /dev/null @@ -1,132 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ------------------------------------------------------------------------- - -# Cluster for the Azure deep storage test. -# -# Required env vars: -# -# AZURE_ACCOUNT -# AZURE_KEY -# AZURE_CONTAINER - -networks: - druid-it-net: - name: druid-it-net - ipam: - config: - - subnet: 172.172.172.0/24 - -services: - zookeeper: - extends: - file: ../Common/dependencies.yaml - service: zookeeper - - metadata: - extends: - file: ../Common/dependencies.yaml - service: metadata - - coordinator: - extends: - file: ../Common/druid.yaml - service: coordinator - container_name: coordinator - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - - druid_test_loadList=druid-azure-extensions - - druid_azure_account=${AZURE_ACCOUNT} - - druid_azure_key=${AZURE_KEY} - - druid_azure_container=${AZURE_CONTAINER} - # The frequency with which the coordinator polls the database - # for changes. The DB population code has to wait at least this - # long for the coordinator to notice changes. - - druid_manager_segments_pollDuration=PT5S - - druid_coordinator_period=PT10S - depends_on: - - zookeeper - - metadata - - overlord: - extends: - file: ../Common/druid.yaml - service: overlord - container_name: overlord - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - - druid_test_loadList=druid-azure-extensions - - druid_azure_account=${AZURE_ACCOUNT} - - druid_azure_key=${AZURE_KEY} - - druid_azure_container=${AZURE_CONTAINER} - depends_on: - - zookeeper - - metadata - - broker: - extends: - file: ../Common/druid.yaml - service: broker - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - - druid_test_loadList=druid-azure-extensions - - druid_azure_account=${AZURE_ACCOUNT} - - druid_azure_key=${AZURE_KEY} - - druid_azure_container=${AZURE_CONTAINER} - depends_on: - - zookeeper - - router: - extends: - file: ../Common/druid.yaml - service: router - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - - druid_test_loadList=druid-azure-extensions - - druid_azure_account=${AZURE_ACCOUNT} - - druid_azure_key=${AZURE_KEY} - - druid_azure_container=${AZURE_CONTAINER} - depends_on: - - zookeeper - - historical: - extends: - file: ../Common/druid.yaml - service: historical - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - - druid_test_loadList=druid-azure-extensions - - druid_azure_account=${AZURE_ACCOUNT} - - druid_azure_key=${AZURE_KEY} - - druid_azure_container=${AZURE_CONTAINER} - depends_on: - - zookeeper - - indexer: - extends: - file: ../Common/druid.yaml - service: indexer - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - - druid_test_loadList=druid-azure-extensions - - druid_storage_type=azure - - druid_azure_account=${AZURE_ACCOUNT} - - druid_azure_key=${AZURE_KEY} - - druid_azure_container=${AZURE_CONTAINER} - volumes: - # Test data - - ../data:/resources - depends_on: - - zookeeper diff --git a/integration-tests-ex/cases/cluster/BatchIndex/docker-compose-indexer.yaml b/integration-tests-ex/cases/cluster/BatchIndex/docker-compose-indexer.yaml deleted file mode 100644 index f8235db6255..00000000000 --- a/integration-tests-ex/cases/cluster/BatchIndex/docker-compose-indexer.yaml +++ /dev/null @@ -1,98 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -networks: - druid-it-net: - name: druid-it-net - ipam: - config: - - subnet: 172.172.172.0/24 - -services: - zookeeper: - extends: - file: ../Common/dependencies.yaml - service: zookeeper - - metadata: - extends: - file: ../Common/dependencies.yaml - service: metadata - - coordinator: - extends: - file: ../Common/druid.yaml - service: coordinator - container_name: coordinator - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - # The frequency with which the coordinator polls the database - # for changes. The DB population code has to wait at least this - # long for the coordinator to notice changes. - - druid_manager_segments_pollDuration=PT5S - - druid_coordinator_period=PT10S - depends_on: - - zookeeper - - metadata - - overlord: - extends: - file: ../Common/druid.yaml - service: overlord - container_name: overlord - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - depends_on: - - zookeeper - - metadata - - broker: - extends: - file: ../Common/druid.yaml - service: broker - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - depends_on: - - zookeeper - - router: - extends: - file: ../Common/druid.yaml - service: router - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - depends_on: - - zookeeper - - historical: - extends: - file: ../Common/druid.yaml - service: historical - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - depends_on: - - zookeeper - - indexer: - extends: - file: ../Common/druid.yaml - service: indexer - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - volumes: - # Test data - - ../../resources:/resources - depends_on: - - zookeeper diff --git a/integration-tests-ex/cases/cluster/BatchIndex/docker-compose.yaml b/integration-tests-ex/cases/cluster/BatchIndex/docker-compose.yaml deleted file mode 100644 index 7778c1d3d05..00000000000 --- a/integration-tests-ex/cases/cluster/BatchIndex/docker-compose.yaml +++ /dev/null @@ -1,98 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -networks: - druid-it-net: - name: druid-it-net - ipam: - config: - - subnet: 172.172.172.0/24 - -services: - zookeeper: - extends: - file: ../Common/dependencies.yaml - service: zookeeper - - metadata: - extends: - file: ../Common/dependencies.yaml - service: metadata - - coordinator: - extends: - file: ../Common/druid.yaml - service: coordinator - container_name: coordinator - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - # The frequency with which the coordinator polls the database - # for changes. The DB population code has to wait at least this - # long for the coordinator to notice changes. - - druid_manager_segments_pollDuration=PT5S - - druid_coordinator_period=PT10S - depends_on: - - zookeeper - - metadata - - overlord: - extends: - file: ../Common/druid.yaml - service: overlord - container_name: overlord - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - depends_on: - - zookeeper - - metadata - - broker: - extends: - file: ../Common/druid.yaml - service: broker - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - depends_on: - - zookeeper - - router: - extends: - file: ../Common/druid.yaml - service: router - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - depends_on: - - zookeeper - - historical: - extends: - file: ../Common/druid.yaml - service: historical - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - depends_on: - - zookeeper - - middlemanager: - extends: - file: ../Common/druid.yaml - service: middlemanager - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - volumes: - # Test data - - ../../resources:/resources - depends_on: - - zookeeper diff --git a/integration-tests-ex/cases/cluster/Common/druid.yaml b/integration-tests-ex/cases/cluster/Common/druid.yaml index bd5caad2232..c76482924c4 100644 --- a/integration-tests-ex/cases/cluster/Common/druid.yaml +++ b/integration-tests-ex/cases/cluster/Common/druid.yaml @@ -61,6 +61,8 @@ services: - environment-configs/common.env - environment-configs/overlord.env - ${OVERRIDE_ENV} + environment: + - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} coordinator: image: ${DRUID_IT_IMAGE_NAME} @@ -78,6 +80,8 @@ services: - environment-configs/common.env - environment-configs/coordinator.env - ${OVERRIDE_ENV} + environment: + - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} historical: image: ${DRUID_IT_IMAGE_NAME} @@ -95,6 +99,8 @@ services: - environment-configs/common.env - environment-configs/historical.env - ${OVERRIDE_ENV} + environment: + - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} middlemanager: image: ${DRUID_IT_IMAGE_NAME} @@ -124,6 +130,8 @@ services: - environment-configs/common.env - environment-configs/middlemanager.env - ${OVERRIDE_ENV} + environment: + - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} indexer: image: ${DRUID_IT_IMAGE_NAME} @@ -141,6 +149,8 @@ services: - environment-configs/common.env - environment-configs/indexer.env - ${OVERRIDE_ENV} + environment: + - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} broker: image: ${DRUID_IT_IMAGE_NAME} @@ -158,6 +168,8 @@ services: - environment-configs/common.env - environment-configs/broker.env - ${OVERRIDE_ENV} + environment: + - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} router: image: ${DRUID_IT_IMAGE_NAME} @@ -175,3 +187,5 @@ services: - environment-configs/common.env - environment-configs/router.env - ${OVERRIDE_ENV} + environment: + - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} diff --git a/integration-tests-ex/cases/cluster/Common/environment-configs/coordinator.env b/integration-tests-ex/cases/cluster/Common/environment-configs/coordinator.env index fa20cf6ccaf..7a0b2ae3995 100644 --- a/integration-tests-ex/cases/cluster/Common/environment-configs/coordinator.env +++ b/integration-tests-ex/cases/cluster/Common/environment-configs/coordinator.env @@ -37,3 +37,9 @@ druid_coordinator_period_indexingPeriod=PT180000S # 2x indexing period so that kill period is valid druid_coordinator_kill_period=PT360000S druid_coordinator_period=PT1S + +# The frequency with which the coordinator polls the database +# for changes. The DB population code has to wait at least this +# long for the coordinator to notice changes. +druid_manager_segments_pollDuration=PT5S +druid_coordinator_period=PT10S diff --git a/integration-tests-ex/cases/cluster/GcsDeepStorage/docker-compose.yaml b/integration-tests-ex/cases/cluster/GcsDeepStorage/docker-compose.yaml deleted file mode 100644 index 3b024104b72..00000000000 --- a/integration-tests-ex/cases/cluster/GcsDeepStorage/docker-compose.yaml +++ /dev/null @@ -1,155 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ------------------------------------------------------------------------- - -# Cluster for the Google Cluster Storage (GCS) deep storage test. -# -# Required env vars: -# -# GOOGLE_BUCKET -# GOOGLE_PREFIX -# GOOGLE_APPLICATION_CREDENTIALS - must point to a file that holds the Google -# credentials. Mounted into each Druid container. - -networks: - druid-it-net: - name: druid-it-net - ipam: - config: - - subnet: 172.172.172.0/24 - -services: - zookeeper: - extends: - file: ../Common/dependencies.yaml - service: zookeeper - - metadata: - extends: - file: ../Common/dependencies.yaml - service: metadata - - coordinator: - extends: - file: ../Common/druid.yaml - service: coordinator - container_name: coordinator - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - - druid_test_loadList=druid-google-extensions - - druid_storage_type=google - - druid_google_bucket=${GOOGLE_BUCKET} - - druid_google_prefix=${GOOGLE_PREFIX} - - GOOGLE_APPLICATION_CREDENTIALS=/resources/credentials.json - # The frequency with which the coordinator polls the database - # for changes. The DB population code has to wait at least this - # long for the coordinator to notice changes. - - druid_manager_segments_pollDuration=PT5S - - druid_coordinator_period=PT10S - volumes: - # Mount credentials file - - ${GOOGLE_APPLICATION_CREDENTIALS}:/resources/credentials.json - depends_on: - - zookeeper - - metadata - - overlord: - extends: - file: ../Common/druid.yaml - service: overlord - container_name: overlord - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - - druid_test_loadList=druid-google-extensions - - druid_storage_type=google - - druid_google_bucket=${GOOGLE_BUCKET} - - druid_google_prefix=${GOOGLE_PREFIX} - - GOOGLE_APPLICATION_CREDENTIALS=/resources/credentials.json - volumes: - # Mount credentials file - - ${GOOGLE_APPLICATION_CREDENTIALS}:/resources/credentials.json - depends_on: - - zookeeper - - metadata - - broker: - extends: - file: ../Common/druid.yaml - service: broker - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - - druid_test_loadList=druid-google-extensions - - druid_storage_type=google - - druid_google_bucket=${GOOGLE_BUCKET} - - druid_google_prefix=${GOOGLE_PREFIX} - - GOOGLE_APPLICATION_CREDENTIALS=/resources/credentials.json - volumes: - # Mount credentials file - - ${GOOGLE_APPLICATION_CREDENTIALS}:/resources/credentials.json - depends_on: - - zookeeper - - router: - extends: - file: ../Common/druid.yaml - service: router - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - - druid_test_loadList=druid-google-extensions - - druid_storage_type=google - - druid_google_bucket=${GOOGLE_BUCKET} - - druid_google_prefix=${GOOGLE_PREFIX} - - GOOGLE_APPLICATION_CREDENTIALS=/resources/credentials.json - volumes: - # Mount credentials file - - ${GOOGLE_APPLICATION_CREDENTIALS}:/resources/credentials.json - depends_on: - - zookeeper - - historical: - extends: - file: ../Common/druid.yaml - service: historical - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - - druid_test_loadList=druid-google-extensions - - druid_storage_type=google - - druid_google_bucket=${GOOGLE_BUCKET} - - druid_google_prefix=${GOOGLE_PREFIX} - - GOOGLE_APPLICATION_CREDENTIALS=/resources/credentials.json - volumes: - # Mount credentials file - - ${GOOGLE_APPLICATION_CREDENTIALS}:/resources/credentials.json - depends_on: - - zookeeper - - indexer: - extends: - file: ../Common/druid.yaml - service: indexer - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - - druid_test_loadList=druid-google-extensions - - druid_storage_type=google - - druid_google_bucket=${GOOGLE_BUCKET} - - druid_google_prefix=${GOOGLE_PREFIX} - - GOOGLE_APPLICATION_CREDENTIALS=/resources/credentials.json - volumes: - # Mount credentials file - - ${GOOGLE_APPLICATION_CREDENTIALS}:/resources/credentials.json - # Test data - - ../data:/resources - depends_on: - - zookeeper diff --git a/integration-tests-ex/cases/cluster/HighAvailability/docker-compose.yaml b/integration-tests-ex/cases/cluster/HighAvailability/docker-compose.yaml deleted file mode 100644 index bcecf4d9eee..00000000000 --- a/integration-tests-ex/cases/cluster/HighAvailability/docker-compose.yaml +++ /dev/null @@ -1,157 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -networks: - druid-it-net: - name: druid-it-net - ipam: - config: - - subnet: 172.172.172.0/24 - -services: - zookeeper: - extends: - file: ../Common/dependencies.yaml - service: zookeeper - - metadata: - extends: - file: ../Common/dependencies.yaml - service: metadata - - coordinator-one: - extends: - file: ../Common/druid.yaml - service: coordinator - container_name: coordinator-one - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - - DRUID_INSTANCE=one - # The frequency with which the coordinator polls the database - # for changes. The DB population code has to wait at least this - # long for the coordinator to notice changes. - - druid_manager_segments_pollDuration=PT5S - - druid_coordinator_period=PT10S - - druid_host=coordinator-one - depends_on: - - zookeeper - - metadata - - # The second Coordinator (and Overlord) cannot extend - # The base service: they need distinct ports. - coordinator-two: - image: ${DRUID_IT_IMAGE_NAME} - container_name: coordinator-two - networks: - druid-it-net: - ipv4_address: 172.172.172.120 - ports: - - 18081:8081 - - 18281:8281 - - 15006:8000 - volumes: - - ${SHARED_DIR}:/shared - env_file: - - ../Common/environment-configs/common.env - - ../Common/environment-configs/coordinator.env - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - - DRUID_INSTANCE=two - - druid_manager_segments_pollDuration=PT5S - - druid_coordinator_period=PT10S - - druid_host=coordinator-two - depends_on: - - zookeeper - - metadata - - overlord-one: - extends: - file: ../Common/druid.yaml - service: overlord - container_name: overlord-one - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - - DRUID_INSTANCE=one - - druid_host=overlord-one - depends_on: - - zookeeper - - metadata - - overlord-two: - image: ${DRUID_IT_IMAGE_NAME} - container_name: overlord-two - networks: - druid-it-net: - ipv4_address: 172.172.172.110 - ports: - - 18090:8090 - - 18290:8290 - - 15009:8000 - volumes: - - ${SHARED_DIR}:/shared - env_file: - - ../Common/environment-configs/common.env - - ../Common/environment-configs/overlord.env - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - - DRUID_INSTANCE=two - - druid_host=overlord-two - depends_on: - - zookeeper - - metadata - - broker: - extends: - file: ../Common/druid.yaml - service: broker - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - depends_on: - - zookeeper - - router: - extends: - file: ../Common/druid.yaml - service: router - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - depends_on: - - zookeeper - - # The custom node role has no base definition. Also, there is - # no environment file: the needed environment settings are - # given here. - custom-node-role: - image: ${DRUID_IT_IMAGE_NAME} - container_name: custom-node-role - networks: - druid-it-net: - ipv4_address: 172.172.172.90 - ports: - - 50011:50011 - - 9301:9301 - - 9501:9501 - - 5010:8000 - volumes: - - ${SHARED_DIR}:/shared - env_file: - - ../Common/environment-configs/common.env - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - - DRUID_SERVICE=custom-node-role - - SERVICE_DRUID_JAVA_OPTS=-Xmx64m -Xms64m - - druid_host=custom-node-role - - druid_auth_basic_common_cacheDirectory=/tmp/authCache/custom_node_role - - druid_server_https_crlPath=/tls/revocations.crl diff --git a/integration-tests-ex/cases/cluster/MultiStageQuery/docker-compose.yaml b/integration-tests-ex/cases/cluster/MultiStageQuery/docker-compose.yaml deleted file mode 100644 index da658b25db3..00000000000 --- a/integration-tests-ex/cases/cluster/MultiStageQuery/docker-compose.yaml +++ /dev/null @@ -1,98 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -networks: - druid-it-net: - name: druid-it-net - ipam: - config: - - subnet: 172.172.172.0/24 - -services: - zookeeper: - extends: - file: ../Common/dependencies.yaml - service: zookeeper - - metadata: - extends: - file: ../Common/dependencies.yaml - service: metadata - - coordinator: - extends: - file: ../Common/druid.yaml - service: coordinator - container_name: coordinator - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - - druid_manager_segments_pollDuration=PT5S - - druid_coordinator_period=PT10S - depends_on: - - zookeeper - - metadata - - overlord: - extends: - file: ../Common/druid.yaml - service: overlord - container_name: overlord - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - depends_on: - - zookeeper - - metadata - - broker: - extends: - file: ../Common/druid.yaml - service: broker - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - depends_on: - - zookeeper - - router: - extends: - file: ../Common/druid.yaml - service: router - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - depends_on: - - zookeeper - - historical: - extends: - file: ../Common/druid.yaml - service: historical - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - depends_on: - - zookeeper - - indexer: - extends: - file: ../Common/druid.yaml - service: indexer - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - - druid_msq_intermediate_storage_enable=true - - druid_msq_intermediate_storage_type=local - - druid_msq_intermediate_storage_basePath=/shared/durablestorage/ - volumes: - # Test data - - ../../resources:/resources - depends_on: - - zookeeper diff --git a/integration-tests-ex/cases/cluster/S3DeepStorage/docker-compose.yaml b/integration-tests-ex/cases/cluster/S3DeepStorage/docker-compose.yaml deleted file mode 100644 index b7a3d745594..00000000000 --- a/integration-tests-ex/cases/cluster/S3DeepStorage/docker-compose.yaml +++ /dev/null @@ -1,129 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ------------------------------------------------------------------------- - -# Cluster for the S3 deep storage test. -# -# Required env vars: -# -# AWS_REGION -# AWS_ACCESS_KEY_ID -# AWS_SECRET_ACCESS_KEY - -networks: - druid-it-net: - name: druid-it-net - ipam: - config: - - subnet: 172.172.172.0/24 - -services: - zookeeper: - extends: - file: ../Common/dependencies.yaml - service: zookeeper - - metadata: - extends: - file: ../Common/dependencies.yaml - service: metadata - - coordinator: - extends: - file: ../Common/druid.yaml - service: coordinator - container_name: coordinator - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - # The frequency with which the coordinator polls the database - # for changes. The DB population code has to wait at least this - # long for the coordinator to notice changes. - - druid_manager_segments_pollDuration=PT5S - - druid_coordinator_period=PT10S - - AWS_REGION=${AWS_REGION} - - druid_s3_accessKey=${AWS_ACCESS_KEY_ID} - - druid_s3_secretKey=${AWS_SECRET_ACCESS_KEY} - depends_on: - - zookeeper - - metadata - - overlord: - extends: - file: ../Common/druid.yaml - service: overlord - container_name: overlord - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - - AWS_REGION=${AWS_REGION} - - druid_s3_accessKey=${AWS_ACCESS_KEY_ID} - - druid_s3_secretKey=${AWS_SECRET_ACCESS_KEY} - depends_on: - - zookeeper - - metadata - - broker: - extends: - file: ../Common/druid.yaml - service: broker - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - - AWS_REGION=${AWS_REGION} - - druid_s3_accessKey=${AWS_ACCESS_KEY_ID} - - druid_s3_secretKey=${AWS_SECRET_ACCESS_KEY} - depends_on: - - zookeeper - - router: - extends: - file: ../Common/druid.yaml - service: router - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - - AWS_REGION=${AWS_REGION} - - druid_s3_accessKey=${AWS_ACCESS_KEY_ID} - - druid_s3_secretKey=${AWS_SECRET_ACCESS_KEY} - depends_on: - - zookeeper - - historical: - extends: - file: ../Common/druid.yaml - service: historical - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - - AWS_REGION=${AWS_REGION} - - druid_s3_accessKey=${AWS_ACCESS_KEY_ID} - - druid_s3_secretKey=${AWS_SECRET_ACCESS_KEY} - depends_on: - - zookeeper - - indexer: - extends: - file: ../Common/druid.yaml - service: indexer - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - - druid_storage_type=s3 - - druid_storage_bucket=${DRUID_CLOUD_BUCKET} - # Using DRUID_CLOUD_PATH env as baseKey as well. - - druid_storage_baseKey=${DRUID_CLOUD_PATH} - - druid_s3_accessKey=${AWS_ACCESS_KEY_ID} - - druid_s3_secretKey=${AWS_SECRET_ACCESS_KEY} - - AWS_REGION=${AWS_REGION} - volumes: - # Test data - - ../data:/resources - depends_on: - - zookeeper diff --git a/integration-tests-ex/cases/src/test/java/org/apache/druid/testsEx/cluster/DruidClusterClient.java b/integration-tests-ex/cases/src/test/java/org/apache/druid/testsEx/cluster/DruidClusterClient.java index 82d31718795..9e2b0ec7278 100644 --- a/integration-tests-ex/cases/src/test/java/org/apache/druid/testsEx/cluster/DruidClusterClient.java +++ b/integration-tests-ex/cases/src/test/java/org/apache/druid/testsEx/cluster/DruidClusterClient.java @@ -32,6 +32,7 @@ import org.apache.druid.java.util.http.client.response.StatusResponseHandler; import org.apache.druid.java.util.http.client.response.StatusResponseHolder; import org.apache.druid.server.DruidNode; import org.apache.druid.testing.guice.TestClient; +import org.apache.druid.testsEx.config.ClusterConfig; import org.apache.druid.testsEx.config.ResolvedConfig; import org.apache.druid.testsEx.config.ResolvedDruidService; import org.apache.druid.testsEx.config.ResolvedService.ResolvedInstance; @@ -335,8 +336,8 @@ public class DruidClusterClient */ public void validate() { - RE exception = new RE("Just building for the stack trace"); - log.info(exception, "Starting cluster validation"); + log.info("Starting cluster validation"); + log.info("This cluster uses " + (ClusterConfig.isIndexer() ? "Indexer" : "Middle Manager")); for (ResolvedDruidService service : config.requireDruid().values()) { for (ResolvedInstance instance : service.requireInstances()) { validateInstance(service, instance); diff --git a/integration-tests-ex/cases/src/test/java/org/apache/druid/testsEx/config/ClusterConfig.java b/integration-tests-ex/cases/src/test/java/org/apache/druid/testsEx/config/ClusterConfig.java index 7c030d9ab4e..e8d277a41ba 100644 --- a/integration-tests-ex/cases/src/test/java/org/apache/druid/testsEx/config/ClusterConfig.java +++ b/integration-tests-ex/cases/src/test/java/org/apache/druid/testsEx/config/ClusterConfig.java @@ -170,6 +170,11 @@ public class ClusterConfig return new ResolvedConfig(clusterName, resolveIncludes(), configTags); } + public static boolean isIndexer() + { + return "indexer".equals(System.getenv("USE_INDEXER")); + } + /** * Create the set of configuration tags for this run. At present, the only options * are "middleManager" or "indexer" corresponding to the value of the diff --git a/integration-tests-ex/cases/templates/AzureDeepStorage.py b/integration-tests-ex/cases/templates/AzureDeepStorage.py new file mode 100644 index 00000000000..3c893832a3a --- /dev/null +++ b/integration-tests-ex/cases/templates/AzureDeepStorage.py @@ -0,0 +1,43 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from template import BaseTemplate, generate + +class Template(BaseTemplate): + + def gen_header_comment(self): + self.emit(''' +# Cluster for the Azure deep storage test. +# +# Required env vars: +# +# AZURE_ACCOUNT +# AZURE_KEY +# AZURE_CONTAINER + +''') + + def extend_druid_service(self, service): + self.add_env(service, 'druid_test_loadList', 'druid-azure-extensions') + self.add_property(service, 'druid.storage.type', 'azure') + self.add_property(service, 'druid.azure.account', '${AZURE_ACCOUNT}') + self.add_property(service, 'druid.azure.key', '${AZURE_KEY}') + self.add_property(service, 'druid.azure.container', '${AZURE_CONTAINER}') + + # This test uses different data than the default. + def define_data_dir(self, service): + self.add_volume(service, '../data', '/resources') + +generate(__file__, Template()) diff --git a/integration-tests-ex/cases/templates/BatchIndex.py b/integration-tests-ex/cases/templates/BatchIndex.py new file mode 100644 index 00000000000..27f8acdbffb --- /dev/null +++ b/integration-tests-ex/cases/templates/BatchIndex.py @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from template import BaseTemplate, generate + +generate(__file__, BaseTemplate()) diff --git a/integration-tests-ex/cases/templates/GcsDeepStorage.py b/integration-tests-ex/cases/templates/GcsDeepStorage.py new file mode 100644 index 00000000000..7f91ce021e7 --- /dev/null +++ b/integration-tests-ex/cases/templates/GcsDeepStorage.py @@ -0,0 +1,45 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from template import BaseTemplate, generate + +class Template(BaseTemplate): + + def gen_header_comment(self): + self.emit(''' +# Cluster for the Google Cluster Storage (GCS) deep storage test. +# +# Required env vars: +# +# GOOGLE_BUCKET +# GOOGLE_PREFIX +# GOOGLE_APPLICATION_CREDENTIALS - must point to a file that holds the Google +# credentials. Mounted into each Druid container. + +''') + + def extend_druid_service(self, service): + self.add_env(service, 'druid_test_loadList', 'druid-google-extensions') + self.add_property(service, 'druid.storage.type', 'google') + self.add_property(service, 'druid.google.bucket', '${GOOGLE_BUCKET}') + self.add_property(service, 'druid.google.prefix', '${GOOGLE_PREFIX}') + self.add_env(service, 'GOOGLE_APPLICATION_CREDENTIALS', '/resources/credentials.json') + self.add_volume(service, '${GOOGLE_APPLICATION_CREDENTIALS}', '/resources/credentials.json') + + # This test uses different data than the default. + def define_data_dir(self, service): + self.add_volume(service, '../data', '/resources') + +generate(__file__, Template()) diff --git a/integration-tests-ex/cases/templates/HighAvailability.py b/integration-tests-ex/cases/templates/HighAvailability.py new file mode 100644 index 00000000000..b00fc0fa1f2 --- /dev/null +++ b/integration-tests-ex/cases/templates/HighAvailability.py @@ -0,0 +1,85 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from template import BaseTemplate, generate +from template import COORDINATOR, ZOO_KEEPER, METADATA, OVERLORD + +# The second Coordinator (and Overlord) cannot extend +# The base service: they need distinct ports. +class Template(BaseTemplate): + + def define_coordinator(self): + self.define_coordinator_one() + self.define_coordinator_two() + + def define_coordinator_one(self): + service_name = COORDINATOR + '-one' + service = self.define_master_service(service_name, COORDINATOR) + service['container_name'] = service_name + self.add_env(service, 'DRUID_INSTANCE', 'one') + self.add_env(service, 'druid_host', service_name) + service['container_name'] = service_name + + def define_coordinator_two(self): + service_name = COORDINATOR + '-two' + service = self.define_full_service(service_name, COORDINATOR, 120) + service['container_name'] = service_name + self.add_env(service, 'DRUID_INSTANCE', 'two') + self.add_env(service, 'druid_host', service_name) + service['ports'] = [ '18081:8081', '18281:8281', '15006:8000' ] + self.add_depends(service, [ ZOO_KEEPER, METADATA ] ) + + def define_overlord(self): + self.define_overlord_one() + self.define_overlord_two() + + def define_overlord_one(self): + service_name = OVERLORD + '-one' + service = self.define_master_service(service_name, OVERLORD) + service['container_name'] = service_name + self.add_env(service, 'DRUID_INSTANCE', 'one') + self.add_env(service, 'druid_host', service_name) + + def define_overlord_two(self): + service_name = OVERLORD + '-two' + service = self.define_full_service(service_name, OVERLORD, 110) + service['container_name'] = service_name + self.add_env(service, 'DRUID_INSTANCE', 'two') + self.add_env(service, 'druid_host', service_name) + service['ports'] = [ '18090:8090', '18290:8290', '15009:8000' ] + self.add_depends(service, [ ZOO_KEEPER, METADATA ] ) + + # No indexer in this cluster + def define_indexer(self): + pass + + # No historical in this cluster + def define_historical(self): + pass + + # The custom node role has no base definition. Also, there is + # no environment file: the needed environment settings are + # given here. + def define_custom_services(self): + service_name = 'custom-node-role' + service = self.define_full_service(service_name, None, 90) + service['container_name'] = service_name + self.add_env(service, 'DRUID_SERVICE', service_name) + self.add_env(service, 'SERVICE_DRUID_JAVA_OPTS', '-Xmx64m -Xms64m') + self.add_env(service, 'druid_host', service_name) + service['ports'] = [ '50011:50011', '9301:9301', '9501:9501', '5010:8000' ] + self.add_depends(service, [ ZOO_KEEPER ] ) + +generate(__file__, Template()) diff --git a/integration-tests-ex/cases/templates/MultiStageQuery.py b/integration-tests-ex/cases/templates/MultiStageQuery.py new file mode 100644 index 00000000000..bb88aa6de2d --- /dev/null +++ b/integration-tests-ex/cases/templates/MultiStageQuery.py @@ -0,0 +1,26 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from template import BaseTemplate, generate + +class Template(BaseTemplate): + + def define_indexer(self): + service = super().define_indexer() + self.add_property(service, 'druid.msq.intermediate.storage.enable', 'true') + self.add_property(service, 'druid.msq.intermediate.storage.type', 'local') + self.add_property(service, 'druid.msq.intermediate.storage.basePath', '/shared/durablestorage/') + +generate(__file__, Template()) diff --git a/integration-tests-ex/cases/templates/S3DeepStorage.py b/integration-tests-ex/cases/templates/S3DeepStorage.py new file mode 100644 index 00000000000..9fb85ca6a7f --- /dev/null +++ b/integration-tests-ex/cases/templates/S3DeepStorage.py @@ -0,0 +1,46 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from template import BaseTemplate, generate + +class Template(BaseTemplate): + + def gen_header_comment(self): + self.emit(''' +# Cluster for the S3 deep storage test. +# +# Required env vars: +# +# DRUID_CLOUD_BUCKET +# DRUID_CLOUD_PATH +# AWS_REGION +# AWS_ACCESS_KEY_ID +# AWS_SECRET_ACCESS_KEY + +''') + + def extend_druid_service(self, service): + self.add_property(service, 'druid.storage.type', 's3') + self.add_property(service, 'druid.s3.accessKey', '${AWS_ACCESS_KEY_ID}') + self.add_property(service, 'druid.s3.secretKey', '${AWS_SECRET_ACCESS_KEY}') + self.add_property(service, 'druid.storage.bucket', '${DRUID_CLOUD_BUCKET}') + self.add_property(service, 'druid.storage.baseKey', '${DRUID_CLOUD_PATH}') + self.add_env(service, 'AWS_REGION', '${AWS_REGION}') + + # This test uses different data than the default. + def define_data_dir(self, service): + self.add_volume(service, '../data', '/resources') + +generate(__file__, Template()) diff --git a/integration-tests-ex/cases/templates/template.py b/integration-tests-ex/cases/templates/template.py new file mode 100644 index 00000000000..1be24ab032e --- /dev/null +++ b/integration-tests-ex/cases/templates/template.py @@ -0,0 +1,430 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +''' +Generates a docker-compose.yaml file from a test-specific template. Each +test template either uses the base template directly, or extends the template +to customize bits of the cluster. + +Since the cluster is defined as YAML, the cluster definition is build up +internally as a Python data structure made up of maps, arrays and scalars. +PyYaml does the grunt work of converting the data structure to the YAML file. +''' + +import yaml, os, os.path +from pathlib import Path + +# Constants used frequently in the template. + +DRUID_NETWORK = 'druid-it-net' +DRUID_SUBNET = '172.172.172' +ZOO_KEEPER = 'zookeeper' +METADATA = 'metadata' +COORDINATOR = 'coordinator' +OVERLORD = 'overlord' +ROUTER = 'router' +BROKER = 'broker' +HISTORICAL = 'historical' +INDEXER = 'indexer' +MIDDLE_MANAGER = 'middlemanager' + +def generate(template_path, template): + ''' + Main routine to generate a docker-compose file from a script with the + given template_path, using the template class given. The template path is + a convenient way to locate directories in the file system using information + that Python itself provides. + ''' + + # Compute the cluster (test category) name from the template path which + # we assume to be module//