From 4a74c5adccf71bc5098da06638bc5dcd3473914b Mon Sep 17 00:00:00 2001 From: Jihoon Son Date: Wed, 5 Jan 2022 23:33:04 -0800 Subject: [PATCH] Use Druid's extension loading for integration test instead of maven (#12095) * Use Druid's extension loading for integration test instead of maven * fix maven command * override config path * load input format extensions and kafka by default; add prepopulated-data group * all docker-composes are overridable * fix s3 configs * override config for all * fix docker_compose_args * fix security tests * turn off debug logs for overlord api calls * clean up stuff * revert docker-compose.yml * fix override config for query error test; fix circular dependency in docker compose * add back some dependencies in docker compose * new maven profile for integration test * example file filter --- .travis.yml | 32 ++--- distribution/pom.xml | 107 +++++++++++++++++ .../assembly/integration-test-assembly.xml | 109 ++++++++++++++++++ docs/development/modules.md | 12 +- integration-tests/README.md | 21 ++-- integration-tests/docker/Dockerfile | 3 +- .../docker/docker-compose.base.yml | 12 ++ ...cker-compose.custom-coordinator-duties.yml | 103 ----------------- .../docker-compose.high-availability.yml | 1 + .../docker/docker-compose.ldap-security.yml | 2 +- .../docker/docker-compose.override-env.yml | 109 ------------------ .../docker-compose.query-error-test.yml | 1 + .../docker-compose.query-retry-test.yml | 1 + .../docker-compose.shuffle-deep-store.yml | 105 ----------------- integration-tests/docker/docker-compose.yml | 2 +- integration-tests/docker/druid.sh | 14 +-- .../docker/environment-configs/common | 3 +- .../common-custom-coordinator-duties | 81 ------------- .../docker/environment-configs/common-ldap | 9 +- .../common-shuffle-deep-store | 81 ------------- .../docker/environment-configs/empty-config | 18 +++ .../test-groups/custom-coordinator-duties | 30 +++++ .../test-groups/prepopulated-data | 26 +++++ .../test-groups/shuffle-deep-store | 23 ++++ integration-tests/pom.xml | 36 +++--- .../script/copy_resources_template.sh | 43 ++----- .../script/docker_compose_args.sh | 108 ++++++++--------- .../script/docker_run_cluster.sh | 4 +- .../clients/OverlordResourceTestClient.java | 28 ++--- .../druid/tests/query/ITSqlCancelTest.java | 4 +- integration-tests/stop_cluster.sh | 3 +- 31 files changed, 472 insertions(+), 659 deletions(-) create mode 100644 distribution/src/assembly/integration-test-assembly.xml delete mode 100644 integration-tests/docker/docker-compose.custom-coordinator-duties.yml delete mode 100644 integration-tests/docker/docker-compose.override-env.yml delete mode 100644 integration-tests/docker/docker-compose.shuffle-deep-store.yml delete mode 100644 integration-tests/docker/environment-configs/common-custom-coordinator-duties delete mode 100644 integration-tests/docker/environment-configs/common-shuffle-deep-store create mode 100644 integration-tests/docker/environment-configs/empty-config create mode 100644 integration-tests/docker/environment-configs/test-groups/custom-coordinator-duties create mode 100644 integration-tests/docker/environment-configs/test-groups/prepopulated-data create mode 100644 integration-tests/docker/environment-configs/test-groups/shuffle-deep-store diff --git a/.travis.yml b/.travis.yml index de38a02fb58..b8d8a03a888 100644 --- a/.travis.yml +++ b/.travis.yml @@ -420,7 +420,7 @@ jobs: - docker env: TESTNG_GROUPS='-Dgroups=batch-index' JVM_RUNTIME='-Djvm.runtime=8' USE_INDEXER='middleManager' script: &run_integration_test - - ${MVN} verify -pl integration-tests -P integration-tests ${TESTNG_GROUPS} ${JVM_RUNTIME} -Dit.indexer=${USE_INDEXER} ${MAVEN_SKIP} + - ${MVN} verify -pl integration-tests -P integration-tests ${TESTNG_GROUPS} ${JVM_RUNTIME} -Dit.indexer=${USE_INDEXER} ${MAVEN_SKIP} -Doverride.config.path=${OVERRIDE_CONFIG_PATH} after_failure: &integration_test_diags - for v in ~/shared/logs/*.log ; do echo $v logtail ======================== ; tail -100 $v ; @@ -475,11 +475,11 @@ jobs: - <<: *integration_perfect_rollup_parallel_batch_index name: "(Compile=openjdk8, Run=openjdk8) perfect rollup parallel batch index integration test with deep storage as intermediate store" - env: TESTNG_GROUPS='-Dgroups=shuffle-deep-store' JVM_RUNTIME='-Djvm.runtime=8' USE_INDEXER='middleManager' + env: TESTNG_GROUPS='-Dgroups=shuffle-deep-store' JVM_RUNTIME='-Djvm.runtime=8' USE_INDEXER='middleManager' OVERRIDE_CONFIG_PATH='./environment-configs/test-groups/shuffle-deep-store' - <<: *integration_perfect_rollup_parallel_batch_index name: "(Compile=openjdk8, Run=openjdk8) perfect rollup parallel batch index integration test with deep storage as intermediate store with indexer" - env: TESTNG_GROUPS='-Dgroups=shuffle-deep-store' JVM_RUNTIME='-Djvm.runtime=8' USE_INDEXER='indexer' + env: TESTNG_GROUPS='-Dgroups=shuffle-deep-store' JVM_RUNTIME='-Djvm.runtime=8' USE_INDEXER='indexer' OVERRIDE_CONFIG_PATH='./environment-configs/test-groups/shuffle-deep-store' - &integration_kafka_index name: "(Compile=openjdk8, Run=openjdk8) kafka index integration test" @@ -496,7 +496,7 @@ jobs: - <<: *integration_kafka_index name: "(Compile=openjdk8, Run=openjdk8) custom coordinator duties integration test" - env: TESTNG_GROUPS='-Dgroups=custom-coordinator-duties' JVM_RUNTIME='-Djvm.runtime=8' USE_INDEXER='middleManager' + env: TESTNG_GROUPS='-Dgroups=custom-coordinator-duties' JVM_RUNTIME='-Djvm.runtime=8' USE_INDEXER='middleManager' OVERRIDE_CONFIG_PATH='./environment-configs/test-groups/custom-coordinator-duties' - &integration_kafka_index_slow name: "(Compile=openjdk8, Run=openjdk8) kafka index integration test slow" @@ -551,7 +551,7 @@ jobs: stage: Tests - phase 2 jdk: openjdk8 services: *integration_test_services - env: TESTNG_GROUPS='-Dgroups=query' JVM_RUNTIME='-Djvm.runtime=8' USE_INDEXER='middleManager' + env: TESTNG_GROUPS='-Dgroups=query' JVM_RUNTIME='-Djvm.runtime=8' USE_INDEXER='middleManager' OVERRIDE_CONFIG_PATH='./environment-configs/test-groups/prepopulated-data' script: *run_integration_test after_failure: *integration_test_diags @@ -560,7 +560,7 @@ jobs: stage: Tests - phase 2 jdk: openjdk8 services: *integration_test_services - env: TESTNG_GROUPS='-Dgroups=query-retry' JVM_RUNTIME='-Djvm.runtime=8' USE_INDEXER='middleManager' + env: TESTNG_GROUPS='-Dgroups=query-retry' JVM_RUNTIME='-Djvm.runtime=8' USE_INDEXER='middleManager' OVERRIDE_CONFIG_PATH='./environment-configs/test-groups/prepopulated-data' script: *run_integration_test after_failure: *integration_test_diags @@ -569,7 +569,7 @@ jobs: stage: Tests - phase 2 jdk: openjdk8 services: *integration_test_services - env: TESTNG_GROUPS='-Dgroups=query-error' JVM_RUNTIME='-Djvm.runtime=8' USE_INDEXER='middleManager' + env: TESTNG_GROUPS='-Dgroups=query-error' JVM_RUNTIME='-Djvm.runtime=8' USE_INDEXER='middleManager' OVERRIDE_CONFIG_PATH='./environment-configs/test-groups/prepopulated-data' script: *run_integration_test after_failure: *integration_test_diags @@ -578,7 +578,7 @@ jobs: stage: Tests - phase 2 jdk: openjdk8 services: *integration_test_services - env: TESTNG_GROUPS='-Dgroups=security' JVM_RUNTIME='-Djvm.runtime=8' USE_INDEXER='middleManager' + env: TESTNG_GROUPS='-Dgroups=security' JVM_RUNTIME='-Djvm.runtime=8' USE_INDEXER='middleManager' OVERRIDE_CONFIG_PATH='./environment-configs/test-groups/prepopulated-data' script: *run_integration_test after_failure: *integration_test_diags @@ -642,12 +642,12 @@ jobs: - <<: *integration_tests name: "(Compile=openjdk8, Run=openjdk8) leadership and high availability integration tests" jdk: openjdk8 - env: TESTNG_GROUPS='-Dgroups=high-availability' JVM_RUNTIME='-Djvm.runtime=8' USE_INDEXER='middleManager' + env: TESTNG_GROUPS='-Dgroups=high-availability' JVM_RUNTIME='-Djvm.runtime=8' USE_INDEXER='middleManager' OVERRIDE_CONFIG_PATH='./environment-configs/test-groups/prepopulated-data' - <<: *integration_query name: "(Compile=openjdk8, Run=openjdk8) query integration test (mariaDB)" jdk: openjdk8 - env: TESTNG_GROUPS='-Dgroups=query' USE_INDEXER='middleManager' MYSQL_DRIVER_CLASSNAME='org.mariadb.jdbc.Driver' + env: TESTNG_GROUPS='-Dgroups=query' USE_INDEXER='middleManager' MYSQL_DRIVER_CLASSNAME='org.mariadb.jdbc.Driver' OVERRIDE_CONFIG_PATH='./environment-configs/test-groups/prepopulated-data' # END - Integration tests for Compile with Java 8 and Run with Java 8 @@ -675,22 +675,22 @@ jobs: - <<: *integration_query name: "(Compile=openjdk8, Run=openjdk11) query integration test" jdk: openjdk8 - env: TESTNG_GROUPS='-Dgroups=query' JVM_RUNTIME='-Djvm.runtime=11' USE_INDEXER='middleManager' + env: TESTNG_GROUPS='-Dgroups=query' JVM_RUNTIME='-Djvm.runtime=11' USE_INDEXER='middleManager' OVERRIDE_CONFIG_PATH='./environment-configs/test-groups/prepopulated-data' - <<: *integration_query_retry name: "(Compile=openjdk8, Run=openjdk11) query retry integration test for missing segments" jdk: openjdk8 - env: TESTNG_GROUPS='-Dgroups=query-retry' JVM_RUNTIME='-Djvm.runtime=11' USE_INDEXER='middleManager' + env: TESTNG_GROUPS='-Dgroups=query-retry' JVM_RUNTIME='-Djvm.runtime=11' USE_INDEXER='middleManager' OVERRIDE_CONFIG_PATH='./environment-configs/test-groups/prepopulated-data' - <<: *integration_query_error name: "(Compile=openjdk8, Run=openjdk11) query error integration test for missing segments" jdk: openjdk8 - env: TESTNG_GROUPS='-Dgroups=query-error' JVM_RUNTIME='-Djvm.runtime=11' USE_INDEXER='middleManager' + env: TESTNG_GROUPS='-Dgroups=query-error' JVM_RUNTIME='-Djvm.runtime=11' USE_INDEXER='middleManager' OVERRIDE_CONFIG_PATH='./environment-configs/test-groups/prepopulated-data' - <<: *integration_security name: "(Compile=openjdk8, Run=openjdk11) security integration test" jdk: openjdk8 - env: TESTNG_GROUPS='-Dgroups=security' JVM_RUNTIME='-Djvm.runtime=11' USE_INDEXER='middleManager' + env: TESTNG_GROUPS='-Dgroups=security' JVM_RUNTIME='-Djvm.runtime=11' USE_INDEXER='middleManager' OVERRIDE_CONFIG_PATH='./environment-configs/test-groups/prepopulated-data' - <<: *integration_ldap_security name: "(Compile=openjdk8, Run=openjdk11) ldap security integration test" @@ -720,12 +720,12 @@ jobs: - <<: *integration_tests name: "(Compile=openjdk8, Run=openjdk11) leadership and high availability integration tests" jdk: openjdk8 - env: TESTNG_GROUPS='-Dgroups=high-availability' JVM_RUNTIME='-Djvm.runtime=11' USE_INDEXER='middleManager' + env: TESTNG_GROUPS='-Dgroups=high-availability' JVM_RUNTIME='-Djvm.runtime=11' USE_INDEXER='middleManager' OVERRIDE_CONFIG_PATH='./environment-configs/test-groups/prepopulated-data' - <<: *integration_query name: "(Compile=openjdk8, Run=openjdk11) query integration test (mariaDB)" jdk: openjdk8 - env: TESTNG_GROUPS='-Dgroups=query' JVM_RUNTIME='-Djvm.runtime=11' USE_INDEXER='middleManager' MYSQL_DRIVER_CLASSNAME='org.mariadb.jdbc.Driver' + env: TESTNG_GROUPS='-Dgroups=query' JVM_RUNTIME='-Djvm.runtime=11' USE_INDEXER='middleManager' MYSQL_DRIVER_CLASSNAME='org.mariadb.jdbc.Driver' OVERRIDE_CONFIG_PATH='./environment-configs/test-groups/prepopulated-data' # END - Integration tests for Compile with Java 8 and Run with Java 11 diff --git a/distribution/pom.xml b/distribution/pom.xml index ad6fd650270..405a2e3eda1 100644 --- a/distribution/pom.xml +++ b/distribution/pom.xml @@ -622,5 +622,112 @@ + + integration-test + + false + + + + + org.codehaus.mojo + exec-maven-plugin + + + pull-deps + package + + exec + + + java + + -classpath + + -Ddruid.extensions.loadList=[] + -Ddruid.extensions.directory=${project.build.directory}/extensions + + + -Ddruid.extensions.hadoopDependenciesDir=${project.build.directory}/hadoop-dependencies + + org.apache.druid.cli.Main + tools + pull-deps + --clean + --defaultVersion + ${project.parent.version} + -l + ${settings.localRepository} + -h + org.apache.hadoop:hadoop-client:${hadoop.compile.version} + -c + org.apache.druid.extensions:druid-avro-extensions + -c + org.apache.druid.extensions:druid-azure-extensions + -c + org.apache.druid.extensions:druid-datasketches + -c + org.apache.druid.extensions:druid-hdfs-storage + -c + org.apache.druid.extensions:druid-histogram + -c + org.apache.druid.extensions:druid-kafka-indexing-service + -c + org.apache.druid.extensions:druid-kinesis-indexing-service + -c + org.apache.druid.extensions:druid-lookups-cached-global + -c + org.apache.druid.extensions:druid-protobuf-extensions + -c + org.apache.druid.extensions:mysql-metadata-storage + -c + org.apache.druid.extensions:druid-orc-extensions + -c + org.apache.druid.extensions:druid-parquet-extensions + -c + org.apache.druid.extensions:postgresql-metadata-storage + -c + org.apache.druid.extensions:druid-s3-extensions + -c + org.apache.druid.extensions:druid-ec2-extensions + -c + org.apache.druid.extensions:druid-google-extensions + -c + org.apache.druid.extensions:simple-client-sslcontext + -c + org.apache.druid.extensions:druid-basic-security + -c + org.apache.druid:druid-integration-tests + -c + org.apache.druid.extensions:druid-testing-tools + ${druid.distribution.pulldeps.opts} + + + + + + + org.apache.maven.plugins + maven-assembly-plugin + + + distro-assembly + package + + single + + + apache-druid-${project.version}-integration-test + posix + + src/assembly/integration-test-assembly.xml + + + + + + + + diff --git a/distribution/src/assembly/integration-test-assembly.xml b/distribution/src/assembly/integration-test-assembly.xml new file mode 100644 index 00000000000..6963311a83e --- /dev/null +++ b/distribution/src/assembly/integration-test-assembly.xml @@ -0,0 +1,109 @@ + + + + + bin + + dir + + false + + + ${project.build.directory}/extensions + + */* + + extensions + + + + ${project.build.directory}/hadoop-dependencies + + */*/* + + hadoop-dependencies + + + ../examples/conf + + * + */* + */*/* + */*/*/* + */*/*/*/* + + conf + + + ../examples/quickstart/ + + * + + quickstart + + + ../examples/quickstart/tutorial + + wikiticker-2015-09-12-sampled.json.gz + + quickstart/tutorial + + + ../examples/quickstart/tutorial/hadoop + + * + + quickstart/tutorial/hadoop + + + ../examples/quickstart/tutorial/hadoop/docker + + * + + quickstart/tutorial/hadoop/docker + + + ../examples/bin + + * + + 744 + bin + + + + + false + true + lib + + org.apache.druid:extensions-distribution + + + + true + + org.apache.druid:extensions-distribution + + + + diff --git a/docs/development/modules.md b/docs/development/modules.md index 50e2a97f77b..2d5f6a82382 100644 --- a/docs/development/modules.md +++ b/docs/development/modules.md @@ -344,11 +344,10 @@ The duties will be grouped into multiple groups as per the elements in list `dru All duties in the same group will have the same run period configured by `druid.coordinator..period`. Currently, there is a single thread running the duties sequentially for each group. -For example, see `KillSupervisorsCustomDuty` for a custom coordinator duty implementation and `common-custom-coordinator-duties` -integration test group which loads `KillSupervisorsCustomDuty` using the configs set in `integration-tests/docker/environment-configs/common-custom-coordinator-duties`. -The relevant configs in `integration-tests/docker/environment-configs/common-custom-coordinator-duties` are as follows: -(The configs create a custom coordinator duty group called `cleanupMetadata` which runs a custom coordinator duty called `killSupervisors` every 10 seconds. -The custom coordinator duty `killSupervisors` also has a config called `retainDuration` which is set to 0 minute) +For example, see `KillSupervisorsCustomDuty` for a custom coordinator duty implementation and the `custom-coordinator-duties` +integration test group which loads `KillSupervisorsCustomDuty` using the configs set in `integration-tests/docker/environment-configs/test-groups/custom-coordinator-duties`. +This config file adds the configs below to enable a custom coordinator duty. + ``` druid.coordinator.dutyGroups=["cleanupMetadata"] druid.coordinator.cleanupMetadata.duties=["killSupervisors"] @@ -356,6 +355,9 @@ druid.coordinator.cleanupMetadata.duty.killSupervisors.retainDuration=PT0M druid.coordinator.cleanupMetadata.period=PT10S ``` +These configurations create a custom coordinator duty group called `cleanupMetadata` which runs a custom coordinator duty called `killSupervisors` every 10 seconds. +The custom coordinator duty `killSupervisors` also has a config called `retainDuration` which is set to 0 minute. + ### Routing data through a HTTP proxy for your extension You can add the ability for the `HttpClient` of your extension to connect through an HTTP proxy. diff --git a/integration-tests/README.md b/integration-tests/README.md index 332f743b544..0b64dd30f1a 100644 --- a/integration-tests/README.md +++ b/integration-tests/README.md @@ -103,7 +103,7 @@ Druid routers for security group integration test (permissive tls, no client aut - To start Druid cluster with override configs ```bash - OVERRIDE_ENV= docker-compose -f docker-compose.override-env.yml up + OVERRIDE_ENV= docker-compose -f docker-compose.yml up ``` - To start tests against Hadoop @@ -127,31 +127,30 @@ Druid routers for security group integration test (permissive tls, no client aut - docker-compose.base.yml - Base file that defines all containers for integration test + Base file that defines all containers for integration testing - docker-compose.yml - Defines Druid cluster with default configuration that is used for running integration tests. + Defines a Druid cluster with default configuration that is used for running integration tests. ```bash docker-compose -f docker-compose.yml up - // DRUID_INTEGRATION_TEST_GROUP - this variable is used in Druid docker container for "security" and "query" test group. Use next docker-compose if you want to run security/query tests. - DRUID_INTEGRATION_TEST_GROUP=security docker-compose -f docker-compose.yml up + # DRUID_INTEGRATION_TEST_GROUP - an environment variable that specifies the integration test group to run. + DRUID_INTEGRATION_TEST_GROUP=batch-index docker-compose -f docker-compose.yml up ``` -- docker-compose.override-env.yml - - Defines Druid cluster with default configuration plus any additional and/or overriden configurations from override-env file. + You can change the default configuration using a custom configuration file. The settings in the file will override + the default settings if they conflict. They will be appended to the default configuration otherwise. ```bash - // OVERRIDE_ENV - variable that must contains path to Druid configuration file - OVERRIDE_ENV=./environment-configs/override-examples/s3 docker-compose -f docker-compose.override-env.yml up + # OVERRIDE_ENV - an environment variable that specifies the custom configuration file path. + OVERRIDE_ENV=./environment-configs/test-groups/prepopulated-data DRUID_INTEGRATION_TEST_GROUP=query docker-compose -f docker-compose.yml up ``` - docker-compose.security.yml Defines three additional Druid router services with permissive tls, no client auth tls, and custom check tls respectively. -This is meant to be use together with docker-compose.yml or docker-compose.override-env.yml and is only needed for the "security" group integration test. + This is meant to be used together with docker-compose.yml and is only needed for the "security" group integration test. ```bash docker-compose -f docker-compose.yml -f docker-compose.security.yml up diff --git a/integration-tests/docker/Dockerfile b/integration-tests/docker/Dockerfile index b8ef7d5c45e..d1b6a4fcdb3 100644 --- a/integration-tests/docker/Dockerfile +++ b/integration-tests/docker/Dockerfile @@ -46,6 +46,7 @@ RUN find /var/lib/mysql -type f -exec touch {} \; && /etc/init.d/mysql start \ # Add Druid jars ADD lib/* /usr/local/druid/lib/ +COPY extensions/ /usr/local/druid/extensions/ # Download the MySQL Java connector # target path must match the exact path referenced in environment-configs/common @@ -65,7 +66,7 @@ RUN wget -q "https://packages.confluent.io/maven/io/confluent/kafka-protobuf-pro # Add sample data # touch is needed because OverlayFS's copy-up operation breaks POSIX standards. See https://github.com/docker/for-linux/issues/72. RUN find /var/lib/mysql -type f -exec touch {} \; && service mysql start \ - && java -cp "/usr/local/druid/lib/*" -Ddruid.metadata.storage.type=mysql -Ddruid.metadata.mysql.driver.driverClassName=$MYSQL_DRIVER_CLASSNAME org.apache.druid.cli.Main tools metadata-init --connectURI="jdbc:mysql://localhost:3306/druid" --user=druid --password=diurd \ + && java -cp "/usr/local/druid/lib/*" -Ddruid.extensions.directory=/usr/local/druid/extensions -Ddruid.extensions.loadList='["mysql-metadata-storage"]' -Ddruid.metadata.storage.type=mysql -Ddruid.metadata.mysql.driver.driverClassName=$MYSQL_DRIVER_CLASSNAME org.apache.druid.cli.Main tools metadata-init --connectURI="jdbc:mysql://localhost:3306/druid" --user=druid --password=diurd \ && /etc/init.d/mysql stop ADD test-data /test-data diff --git a/integration-tests/docker/docker-compose.base.yml b/integration-tests/docker/docker-compose.base.yml index 82870d02f02..2f60212bc66 100644 --- a/integration-tests/docker/docker-compose.base.yml +++ b/integration-tests/docker/docker-compose.base.yml @@ -89,6 +89,7 @@ services: env_file: - ./environment-configs/common - ./environment-configs/overlord + - ${OVERRIDE_ENV} druid-overlord-two: image: druid/cluster @@ -108,6 +109,7 @@ services: env_file: - ./environment-configs/common - ./environment-configs/overlord + - ${OVERRIDE_ENV} ### coordinators druid-coordinator: @@ -128,6 +130,7 @@ services: env_file: - ./environment-configs/common - ./environment-configs/coordinator + - ${OVERRIDE_ENV} druid-coordinator-two: image: druid/cluster @@ -147,6 +150,7 @@ services: env_file: - ./environment-configs/common - ./environment-configs/coordinator + - ${OVERRIDE_ENV} ### historicals druid-historical: @@ -167,6 +171,7 @@ services: env_file: - ./environment-configs/common - ./environment-configs/historical + - ${OVERRIDE_ENV} ### middle managers druid-middlemanager: @@ -200,6 +205,7 @@ services: env_file: - ./environment-configs/common - ./environment-configs/middlemanager + - ${OVERRIDE_ENV} ### indexers druid-indexer: @@ -221,6 +227,7 @@ services: env_file: - ./environment-configs/common - ./environment-configs/indexer + - ${OVERRIDE_ENV} ### brokers druid-broker: @@ -241,6 +248,7 @@ services: env_file: - ./environment-configs/common - ./environment-configs/broker + - ${OVERRIDE_ENV} ### routers druid-router: @@ -261,6 +269,7 @@ services: env_file: - ./environment-configs/common - ./environment-configs/router + - ${OVERRIDE_ENV} druid-router-permissive-tls: image: druid/cluster @@ -279,6 +288,7 @@ services: env_file: - ./environment-configs/common - ./environment-configs/router-permissive-tls + - ${OVERRIDE_ENV} druid-router-no-client-auth-tls: image: druid/cluster @@ -297,6 +307,7 @@ services: env_file: - ./environment-configs/common - ./environment-configs/router-no-client-auth-tls + - ${OVERRIDE_ENV} druid-router-custom-check-tls: image: druid/cluster @@ -315,6 +326,7 @@ services: env_file: - ./environment-configs/common - ./environment-configs/router-custom-check-tls + - ${OVERRIDE_ENV} ### optional supporting infra diff --git a/integration-tests/docker/docker-compose.custom-coordinator-duties.yml b/integration-tests/docker/docker-compose.custom-coordinator-duties.yml deleted file mode 100644 index f4ecfcba993..00000000000 --- a/integration-tests/docker/docker-compose.custom-coordinator-duties.yml +++ /dev/null @@ -1,103 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version: "2.2" -services: - druid-zookeeper-kafka: - extends: - file: docker-compose.base.yml - service: druid-zookeeper-kafka - - druid-metadata-storage: - extends: - file: docker-compose.base.yml - service: druid-metadata-storage - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - depends_on: - - druid-zookeeper-kafka - - druid-coordinator: - extends: - file: docker-compose.base.yml - service: druid-coordinator - env_file: - - ./environment-configs/common-custom-coordinator-duties - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - depends_on: - - druid-metadata-storage - - druid-zookeeper-kafka - - druid-overlord: - extends: - file: docker-compose.base.yml - service: druid-overlord - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - depends_on: - - druid-coordinator - - druid-metadata-storage - - druid-zookeeper-kafka - - druid-historical: - extends: - file: docker-compose.base.yml - service: druid-historical - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - depends_on: - - druid-zookeeper-kafka - - druid-middlemanager: - extends: - file: docker-compose.base.yml - service: druid-middlemanager - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - depends_on: - - druid-zookeeper-kafka - - druid-overlord - - druid-broker: - extends: - file: docker-compose.base.yml - service: druid-broker - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - depends_on: - - druid-coordinator - - druid-zookeeper-kafka - - druid-middlemanager - - druid-historical - - druid-router: - extends: - file: docker-compose.base.yml - service: druid-router - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - depends_on: - - druid-zookeeper-kafka - - druid-coordinator - - druid-broker - - druid-overlord - -networks: - druid-it-net: - name: druid-it-net - ipam: - config: - - subnet: 172.172.172.0/24 \ No newline at end of file diff --git a/integration-tests/docker/docker-compose.high-availability.yml b/integration-tests/docker/docker-compose.high-availability.yml index 36ef57a199a..68049452d97 100644 --- a/integration-tests/docker/docker-compose.high-availability.yml +++ b/integration-tests/docker/docker-compose.high-availability.yml @@ -138,6 +138,7 @@ services: - druid_server_https_crlPath=/tls/revocations.crl env_file: - ./environment-configs/common + - ${OVERRIDE_ENV} depends_on: - druid-zookeeper-kafka - druid-coordinator diff --git a/integration-tests/docker/docker-compose.ldap-security.yml b/integration-tests/docker/docker-compose.ldap-security.yml index 53fdb1df5c0..e700edc5b94 100644 --- a/integration-tests/docker/docker-compose.ldap-security.yml +++ b/integration-tests/docker/docker-compose.ldap-security.yml @@ -56,6 +56,7 @@ services: - druid-openldap - druid-metadata-storage - druid-zookeeper-kafka + - druid-overlord druid-overlord: extends: @@ -68,7 +69,6 @@ services: - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} depends_on: - druid-openldap - - druid-coordinator - druid-metadata-storage - druid-zookeeper-kafka diff --git a/integration-tests/docker/docker-compose.override-env.yml b/integration-tests/docker/docker-compose.override-env.yml deleted file mode 100644 index 13820387d03..00000000000 --- a/integration-tests/docker/docker-compose.override-env.yml +++ /dev/null @@ -1,109 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version: "2.2" -services: - druid-zookeeper-kafka: - extends: - file: docker-compose.base.yml - service: druid-zookeeper-kafka - - druid-metadata-storage: - extends: - file: docker-compose.base.yml - service: druid-metadata-storage - depends_on: - - druid-zookeeper-kafka - - druid-overlord: - extends: - file: docker-compose.base.yml - service: druid-overlord - env_file: - - ${OVERRIDE_ENV} - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - depends_on: - - druid-metadata-storage - - druid-zookeeper-kafka - - druid-coordinator: - extends: - file: docker-compose.base.yml - service: druid-coordinator - env_file: - - ${OVERRIDE_ENV} - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - depends_on: - - druid-overlord - - druid-metadata-storage - - druid-zookeeper-kafka - - druid-historical: - extends: - file: docker-compose.base.yml - service: druid-historical - env_file: - - ${OVERRIDE_ENV} - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - depends_on: - - druid-zookeeper-kafka - - druid-middlemanager: - extends: - file: docker-compose.base.yml - service: druid-middlemanager - env_file: - - ${OVERRIDE_ENV} - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - depends_on: - - druid-zookeeper-kafka - - druid-overlord - - druid-broker: - extends: - file: docker-compose.base.yml - service: druid-broker - env_file: - - ${OVERRIDE_ENV} - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - depends_on: - - druid-zookeeper-kafka - - druid-middlemanager - - druid-historical - - druid-router: - extends: - file: docker-compose.base.yml - service: druid-router - env_file: - - ${OVERRIDE_ENV} - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - depends_on: - - druid-zookeeper-kafka - - druid-coordinator - - druid-broker - -networks: - druid-it-net: - name: druid-it-net - ipam: - config: - - subnet: 172.172.172.0/24 \ No newline at end of file diff --git a/integration-tests/docker/docker-compose.query-error-test.yml b/integration-tests/docker/docker-compose.query-error-test.yml index 6d1d0521fdd..5dc175829ad 100644 --- a/integration-tests/docker/docker-compose.query-error-test.yml +++ b/integration-tests/docker/docker-compose.query-error-test.yml @@ -88,6 +88,7 @@ services: env_file: - ./environment-configs/common - ./environment-configs/historical-for-query-error-test + - ${OVERRIDE_ENV} environment: - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} depends_on: diff --git a/integration-tests/docker/docker-compose.query-retry-test.yml b/integration-tests/docker/docker-compose.query-retry-test.yml index bcea3383acd..fbaaf07250f 100644 --- a/integration-tests/docker/docker-compose.query-retry-test.yml +++ b/integration-tests/docker/docker-compose.query-retry-test.yml @@ -97,6 +97,7 @@ services: env_file: - ./environment-configs/common - ./environment-configs/historical-for-query-error-test + - ${OVERRIDE_ENV} environment: - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} depends_on: diff --git a/integration-tests/docker/docker-compose.shuffle-deep-store.yml b/integration-tests/docker/docker-compose.shuffle-deep-store.yml deleted file mode 100644 index cf2670ed1f2..00000000000 --- a/integration-tests/docker/docker-compose.shuffle-deep-store.yml +++ /dev/null @@ -1,105 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version: "2.2" -services: - druid-zookeeper-kafka: - extends: - file: docker-compose.base.yml - service: druid-zookeeper-kafka - - druid-metadata-storage: - extends: - file: docker-compose.base.yml - service: druid-metadata-storage - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - depends_on: - - druid-zookeeper-kafka - - druid-coordinator: - extends: - file: docker-compose.base.yml - service: druid-coordinator - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - depends_on: - - druid-metadata-storage - - druid-zookeeper-kafka - - druid-overlord: - extends: - file: docker-compose.base.yml - service: druid-overlord - env_file: - - ./environment-configs/common-shuffle-deep-store - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - depends_on: - - druid-coordinator - - druid-metadata-storage - - druid-zookeeper-kafka - - druid-historical: - extends: - file: docker-compose.base.yml - service: druid-historical - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - depends_on: - - druid-zookeeper-kafka - - druid-middlemanager: - extends: - file: docker-compose.base.yml - service: druid-middlemanager - env_file: - - ./environment-configs/common-shuffle-deep-store - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - depends_on: - - druid-zookeeper-kafka - - druid-overlord - - druid-broker: - extends: - file: docker-compose.base.yml - service: druid-broker - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - depends_on: - - druid-coordinator - - druid-zookeeper-kafka - - druid-middlemanager - - druid-historical - - druid-router: - extends: - file: docker-compose.base.yml - service: druid-router - environment: - - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} - depends_on: - - druid-zookeeper-kafka - - druid-coordinator - - druid-broker - - druid-overlord - -networks: - druid-it-net: - name: druid-it-net - ipam: - config: - - subnet: 172.172.172.0/24 \ No newline at end of file diff --git a/integration-tests/docker/docker-compose.yml b/integration-tests/docker/docker-compose.yml index d6df30c72fe..277d4b27cbe 100644 --- a/integration-tests/docker/docker-compose.yml +++ b/integration-tests/docker/docker-compose.yml @@ -36,6 +36,7 @@ services: environment: - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} depends_on: + - druid-overlord - druid-metadata-storage - druid-zookeeper-kafka @@ -46,7 +47,6 @@ services: environment: - DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP} depends_on: - - druid-coordinator - druid-metadata-storage - druid-zookeeper-kafka diff --git a/integration-tests/docker/druid.sh b/integration-tests/docker/druid.sh index adece180602..eb13fe8b527 100755 --- a/integration-tests/docker/druid.sh +++ b/integration-tests/docker/druid.sh @@ -88,17 +88,8 @@ setupData() if [ "$DRUID_INTEGRATION_TEST_GROUP" = "query" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "query-retry" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "query-error" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "high-availability" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "security" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "ldap-security" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "upgrade" ]; then # touch is needed because OverlayFS's copy-up operation breaks POSIX standards. See https://github.com/docker/for-linux/issues/72. find /var/lib/mysql -type f -exec touch {} \; && service mysql start \ - && cat /test-data/${DRUID_INTEGRATION_TEST_GROUP}-sample-data.sql | mysql -u root druid && /etc/init.d/mysql stop - # below s3 credentials needed to access the pre-existing s3 bucket - setKey $DRUID_SERVICE druid.s3.accessKey AKIAT2GGLKKJQCMG64V4 - setKey $DRUID_SERVICE druid.s3.secretKey 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 - if [ "$DRUID_INTEGRATION_TEST_GROUP" = "query-retry" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "query-error" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "high-availability" ]; then - setKey $DRUID_SERVICE druid.extensions.loadList [\"druid-s3-extensions\",\"druid-integration-tests\"] - else - setKey $DRUID_SERVICE druid.extensions.loadList [\"druid-s3-extensions\"] - fi - # The region of the sample data s3 blobs needed for these test groups - export AWS_REGION=us-east-1 + && cat /test-data/${DRUID_INTEGRATION_TEST_GROUP}-sample-data.sql | mysql -u root druid \ + && /etc/init.d/mysql stop fi if [ "$MYSQL_DRIVER_CLASSNAME" != "com.mysql.jdbc.Driver" ] ; then @@ -114,4 +105,3 @@ setupData() && /etc/init.d/mysql stop fi } - diff --git a/integration-tests/docker/environment-configs/common b/integration-tests/docker/environment-configs/common index 49f83d415c0..592246bc948 100644 --- a/integration-tests/docker/environment-configs/common +++ b/integration-tests/docker/environment-configs/common @@ -26,7 +26,8 @@ COMMON_DRUID_JAVA_OPTS=-Duser.timezone=UTC -Dfile.encoding=UTF-8 -Dlog4j.configu DRUID_DEP_LIB_DIR=/shared/hadoop_xml:/shared/docker/lib/*:/usr/local/druid/lib/mysql-connector-java.jar # Druid configs -druid_extensions_loadList=[] +druid_extensions_loadList=["mysql-metadata-storage","druid-basic-security","simple-client-sslcontext","druid-testing-tools","druid-lookups-cached-global","druid-histogram","druid-datasketches","druid-parquet-extensions","druid-avro-extensions","druid-protobuf-extensions","druid-orc-extensions","druid-kafka-indexing-service"] +druid_startup_logging_logProperties=true druid_extensions_directory=/shared/docker/extensions druid_auth_authenticator_basic_authorizerName=basic druid_auth_authenticator_basic_initialAdminPassword=priest diff --git a/integration-tests/docker/environment-configs/common-custom-coordinator-duties b/integration-tests/docker/environment-configs/common-custom-coordinator-duties deleted file mode 100644 index e6bddd658f6..00000000000 --- a/integration-tests/docker/environment-configs/common-custom-coordinator-duties +++ /dev/null @@ -1,81 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -LANG=C.UTF-8 -LANGUAGE=C.UTF-8 -LC_ALL=C.UTF-8 - -# JAVA OPTS -COMMON_DRUID_JAVA_OPTS=-Duser.timezone=UTC -Dfile.encoding=UTF-8 -Dlog4j.configurationFile=/shared/docker/lib/log4j2.xml -XX:+ExitOnOutOfMemoryError -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/tmp -DRUID_DEP_LIB_DIR=/shared/hadoop_xml:/shared/docker/lib/*:/usr/local/druid/lib/mysql-connector-java.jar - -# Druid configs -druid_extensions_loadList=[] -druid_extensions_directory=/shared/docker/extensions -druid_auth_authenticator_basic_authorizerName=basic -druid_auth_authenticator_basic_initialAdminPassword=priest -druid_auth_authenticator_basic_initialInternalClientPassword=warlock -druid_auth_authenticator_basic_type=basic -druid_auth_authenticatorChain=["basic"] -druid_auth_authorizer_basic_type=basic -druid_auth_authorizers=["basic"] -druid_client_https_certAlias=druid -druid_client_https_keyManagerPassword=druid123 -druid_client_https_keyStorePassword=druid123 -druid_client_https_keyStorePath=/tls/server.jks -druid_client_https_protocol=TLSv1.2 -druid_client_https_trustStoreAlgorithm=PKIX -druid_client_https_trustStorePassword=druid123 -druid_client_https_trustStorePath=/tls/truststore.jks -druid_enableTlsPort=true -druid_escalator_authorizerName=basic -druid_escalator_internalClientPassword=warlock -druid_escalator_internalClientUsername=druid_system -druid_escalator_type=basic -druid_lookup_numLookupLoadingThreads=1 -druid_server_http_numThreads=20 -# Allow OPTIONS method for ITBasicAuthConfigurationTest.testSystemSchemaAccess -druid_server_http_allowedHttpMethods=["OPTIONS"] -druid_server_https_certAlias=druid -druid_server_https_keyManagerPassword=druid123 -druid_server_https_keyStorePassword=druid123 -druid_server_https_keyStorePath=/tls/server.jks -druid_server_https_keyStoreType=jks -druid_server_https_requireClientCertificate=true -druid_server_https_trustStoreAlgorithm=PKIX -druid_server_https_trustStorePassword=druid123 -druid_server_https_trustStorePath=/tls/truststore.jks -druid_server_https_validateHostnames=true -druid_zk_service_host=druid-zookeeper-kafka -druid_auth_basic_common_maxSyncRetries=20 -druid_indexer_logs_directory=/shared/tasklogs -druid_sql_enable=true -druid_extensions_hadoopDependenciesDir=/shared/hadoop-dependencies -druid_request_logging_type=slf4j - -# Testing the legacy config from https://github.com/apache/druid/pull/10267 -# Can remove this when the flag is no longer needed -druid_indexer_task_ignoreTimestampSpecForDruidInputSource=true - -#Testing kill supervisor custom coordinator duty -druid_coordinator_kill_supervisor_on=false -druid_coordinator_dutyGroups=["cleanupMetadata"] -druid_coordinator_cleanupMetadata_duties=["killSupervisors"] -druid_coordinator_cleanupMetadata_duty_killSupervisors_retainDuration=PT0M -druid_coordinator_cleanupMetadata_period=PT10S \ No newline at end of file diff --git a/integration-tests/docker/environment-configs/common-ldap b/integration-tests/docker/environment-configs/common-ldap index 243f09d66d6..418ae108660 100644 --- a/integration-tests/docker/environment-configs/common-ldap +++ b/integration-tests/docker/environment-configs/common-ldap @@ -20,13 +20,14 @@ LANG=C.UTF-8 LANGUAGE=C.UTF-8 LC_ALL=C.UTF-8 +AWS_REGION=us-east-1 # JAVA OPTS COMMON_DRUID_JAVA_OPTS=-Duser.timezone=UTC -Dfile.encoding=UTF-8 -Dlog4j.configurationFile=/shared/docker/lib/log4j2.xml -XX:+ExitOnOutOfMemoryError -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/tmp DRUID_DEP_LIB_DIR=/shared/hadoop_xml:/shared/docker/lib/*:/usr/local/druid/lib/mysql-connector-java.jar # Druid configs -druid_extensions_loadList=[] +druid_extensions_loadList=["mysql-metadata-storage","druid-s3-extensions","druid-basic-security","simple-client-sslcontext","druid-testing-tools","druid-lookups-cached-global","druid-histogram","druid-datasketches"] druid_extensions_directory=/shared/docker/extensions druid_auth_authenticator_ldap_authorizerName=ldapauth druid_auth_authenticator_ldap_initialAdminPassword=priest @@ -77,4 +78,8 @@ druid_auth_basic_common_maxSyncRetries=20 druid_indexer_logs_directory=/shared/tasklogs druid_sql_enable=true druid_extensions_hadoopDependenciesDir=/shared/hadoop-dependencies -druid_request_logging_type=slf4j \ No newline at end of file +druid_request_logging_type=slf4j + +# Setting s3 credentials and region to use pre-populated data for testing. +druid_s3_accessKey=AKIAT2GGLKKJQCMG64V4 +druid_s3_secretKey=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 diff --git a/integration-tests/docker/environment-configs/common-shuffle-deep-store b/integration-tests/docker/environment-configs/common-shuffle-deep-store deleted file mode 100644 index 30117bf369e..00000000000 --- a/integration-tests/docker/environment-configs/common-shuffle-deep-store +++ /dev/null @@ -1,81 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -LANG=C.UTF-8 -LANGUAGE=C.UTF-8 -LC_ALL=C.UTF-8 - -# JAVA OPTS -COMMON_DRUID_JAVA_OPTS=-Duser.timezone=UTC -Dfile.encoding=UTF-8 -Dlog4j.configurationFile=/shared/docker/lib/log4j2.xml -XX:+ExitOnOutOfMemoryError -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/tmp -DRUID_DEP_LIB_DIR=/shared/hadoop_xml:/shared/docker/lib/*:/usr/local/druid/lib/mysql-connector-java.jar - -# Druid configs -druid_extensions_loadList=[] -druid_extensions_directory=/shared/docker/extensions -druid_auth_authenticator_basic_authorizerName=basic -druid_auth_authenticator_basic_initialAdminPassword=priest -druid_auth_authenticator_basic_initialInternalClientPassword=warlock -druid_auth_authenticator_basic_type=basic -druid_auth_authenticatorChain=["basic"] -druid_auth_authorizer_basic_type=basic -druid_auth_authorizers=["basic"] -druid_client_https_certAlias=druid -druid_client_https_keyManagerPassword=druid123 -druid_client_https_keyStorePassword=druid123 -druid_client_https_keyStorePath=/tls/server.jks -druid_client_https_protocol=TLSv1.2 -druid_client_https_trustStoreAlgorithm=PKIX -druid_client_https_trustStorePassword=druid123 -druid_client_https_trustStorePath=/tls/truststore.jks -druid_enableTlsPort=true -druid_escalator_authorizerName=basic -druid_escalator_internalClientPassword=warlock -druid_escalator_internalClientUsername=druid_system -druid_escalator_type=basic -druid_lookup_numLookupLoadingThreads=1 -druid_server_http_numThreads=20 -# Allow OPTIONS method for ITBasicAuthConfigurationTest.testSystemSchemaAccess -druid_server_http_allowedHttpMethods=["OPTIONS"] -druid_server_https_certAlias=druid -druid_server_https_keyManagerPassword=druid123 -druid_server_https_keyStorePassword=druid123 -druid_server_https_keyStorePath=/tls/server.jks -druid_server_https_keyStoreType=jks -druid_server_https_requireClientCertificate=true -druid_server_https_trustStoreAlgorithm=PKIX -druid_server_https_trustStorePassword=druid123 -druid_server_https_trustStorePath=/tls/truststore.jks -druid_server_https_validateHostnames=true -druid_zk_service_host=druid-zookeeper-kafka -druid_auth_basic_common_maxSyncRetries=20 -druid_indexer_logs_directory=/shared/tasklogs -druid_sql_enable=true -druid_extensions_hadoopDependenciesDir=/shared/hadoop-dependencies -druid_request_logging_type=slf4j -druid_coordinator_kill_supervisor_on=true -druid_coordinator_kill_supervisor_period=PT10S -druid_coordinator_kill_supervisor_durationToRetain=PT0M -druid_coordinator_period_metadataStoreManagementPeriod=PT10S - -# Testing the legacy config from https://github.com/apache/druid/pull/10267 -# Can remove this when the flag is no longer needed -druid_indexer_task_ignoreTimestampSpecForDruidInputSource=true -# Test with deep storage as intermediate location to store shuffle data -# Local deep storage will be used here -druid_processing_intermediaryData_storage_type=deepstore diff --git a/integration-tests/docker/environment-configs/empty-config b/integration-tests/docker/environment-configs/empty-config new file mode 100644 index 00000000000..fe95886d5c1 --- /dev/null +++ b/integration-tests/docker/environment-configs/empty-config @@ -0,0 +1,18 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# diff --git a/integration-tests/docker/environment-configs/test-groups/custom-coordinator-duties b/integration-tests/docker/environment-configs/test-groups/custom-coordinator-duties new file mode 100644 index 00000000000..0fd17a49617 --- /dev/null +++ b/integration-tests/docker/environment-configs/test-groups/custom-coordinator-duties @@ -0,0 +1,30 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +druid_extensions_loadList=["druid-kafka-indexing-service","mysql-metadata-storage","druid-s3-extensions","druid-basic-security","simple-client-sslcontext","druid-testing-tools","druid-lookups-cached-global","druid-histogram","druid-datasketches"] + +druid_coordinator_period_metadataStoreManagementPeriod=PT1H +druid_sql_planner_authorizeSystemTablesDirectly=false + +#Testing kill supervisor custom coordinator duty +druid_coordinator_kill_supervisor_on=false +druid_coordinator_dutyGroups=["cleanupMetadata"] +druid_coordinator_cleanupMetadata_duties=["killSupervisors"] +druid_coordinator_cleanupMetadata_duty_killSupervisors_retainDuration=PT0M +druid_coordinator_cleanupMetadata_period=PT10S diff --git a/integration-tests/docker/environment-configs/test-groups/prepopulated-data b/integration-tests/docker/environment-configs/test-groups/prepopulated-data new file mode 100644 index 00000000000..acce51db6d6 --- /dev/null +++ b/integration-tests/docker/environment-configs/test-groups/prepopulated-data @@ -0,0 +1,26 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +AWS_REGION=us-east-1 + +druid_extensions_loadList=["mysql-metadata-storage","druid-s3-extensions","druid-basic-security","simple-client-sslcontext","druid-testing-tools","druid-lookups-cached-global","druid-histogram","druid-datasketches","druid-integration-tests"] + +# Setting s3 credentials and region to use pre-populated data for testing. +druid_s3_accessKey=AKIAT2GGLKKJQCMG64V4 +druid_s3_secretKey=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 diff --git a/integration-tests/docker/environment-configs/test-groups/shuffle-deep-store b/integration-tests/docker/environment-configs/test-groups/shuffle-deep-store new file mode 100644 index 00000000000..70a6a65940c --- /dev/null +++ b/integration-tests/docker/environment-configs/test-groups/shuffle-deep-store @@ -0,0 +1,23 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +# Test with deep storage as intermediate location to store shuffle data +# Local deep storage will be used here +druid_extensions_loadList=["mysql-metadata-storage","druid-basic-security","simple-client-sslcontext","druid-testing-tools","druid-lookups-cached-global","druid-histogram","druid-datasketches"] +druid_processing_intermediaryData_storage_type=deepstore diff --git a/integration-tests/pom.xml b/integration-tests/pom.xml index 9985a022c91..2e58be2fa23 100644 --- a/integration-tests/pom.xml +++ b/integration-tests/pom.xml @@ -95,13 +95,13 @@ com.amazonaws aws-java-sdk-s3 ${aws.sdk.version} - runtime + provided org.apache.druid.extensions druid-orc-extensions ${project.parent.version} - runtime + provided javax.servlet @@ -117,49 +117,49 @@ org.apache.druid.extensions druid-parquet-extensions ${project.parent.version} - runtime + provided org.apache.druid.extensions druid-avro-extensions ${project.parent.version} - runtime + provided org.apache.druid.extensions druid-protobuf-extensions ${project.parent.version} - runtime + provided org.apache.druid.extensions druid-s3-extensions ${project.parent.version} - runtime + provided org.apache.druid.extensions druid-kinesis-indexing-service ${project.parent.version} - runtime + provided org.apache.druid.extensions druid-azure-extensions ${project.parent.version} - runtime + provided org.apache.druid.extensions druid-google-extensions ${project.parent.version} - runtime + provided org.apache.druid.extensions druid-hdfs-storage ${project.parent.version} - runtime + provided com.amazonaws @@ -171,19 +171,19 @@ org.apache.druid.extensions druid-datasketches ${project.parent.version} - runtime + provided org.apache.druid.extensions druid-histogram ${project.parent.version} - runtime + provided org.apache.druid druid-aws-common ${project.parent.version} - runtime + provided org.apache.druid @@ -204,7 +204,7 @@ org.apache.druid.extensions mysql-metadata-storage ${project.parent.version} - runtime + provided org.apache.druid.extensions @@ -226,13 +226,13 @@ org.apache.druid.extensions druid-lookups-cached-global ${project.parent.version} - runtime + provided org.apache.druid.extensions druid-testing-tools ${project.parent.version} - runtime + provided org.apache.druid.extensions @@ -301,7 +301,7 @@ org.apache.logging.log4j log4j-slf4j-impl - runtime + provided com.google.code.findbugs @@ -602,6 +602,7 @@ ${apache.kafka.version} ${zookeeper.version} ${hadoop.compile.version} + ${parent.version} ${project.basedir}/build_run_cluster.sh @@ -618,6 +619,7 @@ ${groups} ${override.config.path} ${it.indexer} + ${parent.version} ${project.basedir}/stop_cluster.sh diff --git a/integration-tests/script/copy_resources_template.sh b/integration-tests/script/copy_resources_template.sh index 3f6eba9b085..87db2d1cbbd 100755 --- a/integration-tests/script/copy_resources_template.sh +++ b/integration-tests/script/copy_resources_template.sh @@ -27,9 +27,15 @@ cp -r client_tls docker/client_tls rm -rf $SHARED_DIR/docker mkdir -p $SHARED_DIR cp -R docker $SHARED_DIR/docker -mvn -B dependency:copy-dependencies -DoutputDirectory=$SHARED_DIR/docker/lib -# Make directories if they dont exist +pushd ../ +rm -rf distribution/target/apache-druid-$DRUID_VERSION-integration-test-bin +mvn -DskipTests -T1C -Danimal.sniffer.skip=true -Dcheckstyle.skip=true -Ddruid.console.skip=true -Denforcer.skip=true -Dforbiddenapis.skip=true -Dmaven.javadoc.skip=true -Dpmd.skip=true -Dspotbugs.skip=true install -Pintegration-test +mv distribution/target/apache-druid-$DRUID_VERSION-integration-test-bin/lib $SHARED_DIR/docker/lib +mv distribution/target/apache-druid-$DRUID_VERSION-integration-test-bin/extensions $SHARED_DIR/docker/extensions +popd + +# Make directoriess if they dont exist mkdir -p $SHARED_DIR/hadoop_xml mkdir -p $SHARED_DIR/hadoop-dependencies mkdir -p $SHARED_DIR/logs @@ -40,37 +46,8 @@ mkdir -p $SHARED_DIR/docker/credentials # install logging config cp src/main/resources/log4j2.xml $SHARED_DIR/docker/lib/log4j2.xml -# copy the integration test jar, it provides test-only extension implementations -cp target/druid-integration-tests*.jar $SHARED_DIR/docker/lib - -# move extensions into a seperate extension folder -# For druid-integration-tests -mkdir -p $SHARED_DIR/docker/extensions/druid-integration-tests -# We don't want to copy tests jar. -cp $SHARED_DIR/docker/lib/druid-integration-tests-*[^s].jar $SHARED_DIR/docker/extensions/druid-integration-tests -# For druid-s3-extensions -mkdir -p $SHARED_DIR/docker/extensions/druid-s3-extensions -mv $SHARED_DIR/docker/lib/druid-s3-extensions-* $SHARED_DIR/docker/extensions/druid-s3-extensions -# For druid-azure-extensions -mkdir -p $SHARED_DIR/docker/extensions/druid-azure-extensions -mv $SHARED_DIR/docker/lib/druid-azure-extensions-* $SHARED_DIR/docker/extensions/druid-azure-extensions -# For druid-google-extensions -mkdir -p $SHARED_DIR/docker/extensions/druid-google-extensions -mv $SHARED_DIR/docker/lib/druid-google-extensions-* $SHARED_DIR/docker/extensions/druid-google-extensions -# For druid-hdfs-storage -mkdir -p $SHARED_DIR/docker/extensions/druid-hdfs-storage -mv $SHARED_DIR/docker/lib/druid-hdfs-storage-* $SHARED_DIR/docker/extensions/druid-hdfs-storage -# For druid-kinesis-indexing-service -mkdir -p $SHARED_DIR/docker/extensions/druid-kinesis-indexing-service -mv $SHARED_DIR/docker/lib/druid-kinesis-indexing-service-* $SHARED_DIR/docker/extensions/druid-kinesis-indexing-service -# For druid-parquet-extensions -# Using cp so that this extensions is included when running Druid without loadList and as a option for the loadList -mkdir -p $SHARED_DIR/docker/extensions/druid-parquet-extensions -cp $SHARED_DIR/docker/lib/druid-parquet-extensions-* $SHARED_DIR/docker/extensions/druid-parquet-extensions -# For druid-orc-extensions -# Using cp so that this extensions is included when running Druid without loadList and as a option for the loadList -mkdir -p $SHARED_DIR/docker/extensions/druid-orc-extensions -cp $SHARED_DIR/docker/lib/druid-orc-extensions-* $SHARED_DIR/docker/extensions/druid-orc-extensions +# Extensions for testing are pulled while creating a binary. +# See the 'integration-test' profile in $ROOT/distribution/pom.xml. # Pull Hadoop dependency if needed if [ -n "$DRUID_INTEGRATION_TEST_START_HADOOP_DOCKER" ] && [ "$DRUID_INTEGRATION_TEST_START_HADOOP_DOCKER" == true ] diff --git a/integration-tests/script/docker_compose_args.sh b/integration-tests/script/docker_compose_args.sh index 06f786bc0bf..ac0f9533c15 100644 --- a/integration-tests/script/docker_compose_args.sh +++ b/integration-tests/script/docker_compose_args.sh @@ -20,75 +20,59 @@ set -e # for a given test group getComposeArgs() { - if [ -z "$DRUID_INTEGRATION_TEST_OVERRIDE_CONFIG_PATH" ] + # Sanity check: DRUID_INTEGRATION_TEST_INDEXER must be "indexer" or "middleManager" + if [ "$DRUID_INTEGRATION_TEST_INDEXER" != "indexer" ] && [ "$DRUID_INTEGRATION_TEST_INDEXER" != "middleManager" ] then - # Sanity check: DRUID_INTEGRATION_TEST_INDEXER must be "indexer" or "middleManager" - if [ "$DRUID_INTEGRATION_TEST_INDEXER" != "indexer" ] && [ "$DRUID_INTEGRATION_TEST_INDEXER" != "middleManager" ] + echo "DRUID_INTEGRATION_TEST_INDEXER must be 'indexer' or 'middleManager' (is '$DRUID_INTEGRATION_TEST_INDEXER')" + exit 1 + fi + if [ "$DRUID_INTEGRATION_TEST_INDEXER" = "indexer" ] + then + # Sanity check: cannot combine CliIndexer tests with security, query-retry tests + if [ "$DRUID_INTEGRATION_TEST_GROUP" = "security" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "ldap-security" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "query-retry" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "query-error" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "high-availability" ] then - echo "DRUID_INTEGRATION_TEST_INDEXER must be 'indexer' or 'middleManager' (is '$DRUID_INTEGRATION_TEST_INDEXER')" + echo "Cannot run test group '$DRUID_INTEGRATION_TEST_GROUP' with CliIndexer" exit 1 - fi - if [ "$DRUID_INTEGRATION_TEST_INDEXER" = "indexer" ] - then - # Sanity check: cannot combine CliIndexer tests with security, query-retry tests - if [ "$DRUID_INTEGRATION_TEST_GROUP" = "security" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "ldap-security" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "query-retry" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "query-error" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "high-availability" ] - then - echo "Cannot run test group '$DRUID_INTEGRATION_TEST_GROUP' with CliIndexer" - exit 1 - elif [ "$DRUID_INTEGRATION_TEST_GROUP" = "kafka-data-format" ] - then - # Replace MiddleManager with Indexer + schema registry container - echo "-f ${DOCKERDIR}/docker-compose.cli-indexer.yml -f ${DOCKERDIR}/docker-compose.schema-registry-indexer.yml" - else - # Replace MiddleManager with Indexer - echo "-f ${DOCKERDIR}/docker-compose.cli-indexer.yml" - fi - elif [ "$DRUID_INTEGRATION_TEST_GROUP" = "security" ] - then - # default + additional druid router (custom-check-tls, permissive-tls, no-client-auth-tls) - echo "-f ${DOCKERDIR}/docker-compose.yml -f ${DOCKERDIR}/docker-compose.security.yml" - elif [ "$DRUID_INTEGRATION_TEST_GROUP" = "ldap-security" ] - then - # default + additional druid router (custom-check-tls, permissive-tls, no-client-auth-tls) - echo "-f ${DOCKERDIR}/docker-compose.yml -f ${DOCKERDIR}/docker-compose.ldap-security.yml" - elif [ "$DRUID_INTEGRATION_TEST_GROUP" = "query-retry" ] - then - # default + additional historical modified for query retry test - # See CliHistoricalForQueryRetryTest. - echo "-f ${DOCKERDIR}/docker-compose.query-retry-test.yml" - elif [ "$DRUID_INTEGRATION_TEST_GROUP" = "query-error" ] - then - # default + additional historical modified for query error test - # See CliHistoricalForQueryRetryTest. - echo "-f ${DOCKERDIR}/docker-compose.query-error-test.yml" - elif [ "$DRUID_INTEGRATION_TEST_GROUP" = "custom-coordinator-duties" ] - then - # default + custom for Coordinator to enable custom coordinator duties - echo "-f ${DOCKERDIR}/docker-compose.custom-coordinator-duties.yml" - elif [ "$DRUID_INTEGRATION_TEST_GROUP" = "high-availability" ] - then - # the 'high availability' test cluster with multiple coordinators and overlords - echo "-f ${DOCKERDIR}/docker-compose.high-availability.yml" elif [ "$DRUID_INTEGRATION_TEST_GROUP" = "kafka-data-format" ] then - # default + schema registry container - echo "-f ${DOCKERDIR}/docker-compose.yml -f ${DOCKERDIR}/docker-compose.schema-registry.yml" - elif [ "$DRUID_INTEGRATION_TEST_GROUP" = "shuffle-deep-store" ] - then - # default + schema registry container - echo "-f ${DOCKERDIR}/docker-compose.shuffle-deep-store.yml" + # Replace MiddleManager with Indexer + schema registry container + echo "-f ${DOCKERDIR}/docker-compose.cli-indexer.yml -f ${DOCKERDIR}/docker-compose.schema-registry-indexer.yml" else - # default - echo "-f ${DOCKERDIR}/docker-compose.yml" + # Replace MiddleManager with Indexer + echo "-f ${DOCKERDIR}/docker-compose.cli-indexer.yml" fi + elif [ "$DRUID_INTEGRATION_TEST_GROUP" = "security" ] + then + # default + additional druid router (custom-check-tls, permissive-tls, no-client-auth-tls) + echo "-f ${DOCKERDIR}/docker-compose.yml -f ${DOCKERDIR}/docker-compose.security.yml" + elif [ "$DRUID_INTEGRATION_TEST_GROUP" = "ldap-security" ] + then + # default + additional druid router (custom-check-tls, permissive-tls, no-client-auth-tls) + echo "-f ${DOCKERDIR}/docker-compose.yml -f ${DOCKERDIR}/docker-compose.ldap-security.yml" + elif [ "$DRUID_INTEGRATION_TEST_GROUP" = "query-retry" ] + then + # default + additional historical modified for query retry test + # See CliHistoricalForQueryRetryTest. + echo "-f ${DOCKERDIR}/docker-compose.query-retry-test.yml" + elif [ "$DRUID_INTEGRATION_TEST_GROUP" = "query-error" ] + then + # default + additional historical modified for query error test + # See CliHistoricalForQueryRetryTest. + echo "-f ${DOCKERDIR}/docker-compose.query-error-test.yml" + elif [ "$DRUID_INTEGRATION_TEST_GROUP" = "high-availability" ] + then + # the 'high availability' test cluster with multiple coordinators and overlords + echo "-f ${DOCKERDIR}/docker-compose.high-availability.yml" + elif [ "$DRUID_INTEGRATION_TEST_GROUP" = "kafka-data-format" ] + then + # default + schema registry container + echo "-f ${DOCKERDIR}/docker-compose.yml -f ${DOCKERDIR}/docker-compose.schema-registry.yml" + elif [ "$DRUID_INTEGRATION_TEST_GROUP" = "kinesis-data-format" ] + then + # default + with override config + schema registry container + echo "-f ${DOCKERDIR}/docker-compose.yml -f ${DOCKERDIR}/docker-compose.schema-registry.yml" else - if [ "$DRUID_INTEGRATION_TEST_GROUP" = "kinesis-data-format" ] - then - # default + with override config + schema registry container - echo "-f ${DOCKERDIR}/docker-compose.override-env.yml -f ${DOCKERDIR}/docker-compose.schema-registry.yml" - else - # with override config - echo "-f ${DOCKERDIR}/docker-compose.override-env.yml" - fi + # default + echo "-f ${DOCKERDIR}/docker-compose.yml" fi } diff --git a/integration-tests/script/docker_run_cluster.sh b/integration-tests/script/docker_run_cluster.sh index 23a1e8276ad..03eb735d2f1 100755 --- a/integration-tests/script/docker_run_cluster.sh +++ b/integration-tests/script/docker_run_cluster.sh @@ -47,9 +47,11 @@ fi if [ -z "$DRUID_INTEGRATION_TEST_OVERRIDE_CONFIG_PATH" ] then # Start Druid cluster - docker-compose $(getComposeArgs) up -d + echo "Starting cluster with empty config" + OVERRIDE_ENV=environment-configs/empty-config docker-compose $(getComposeArgs) up -d else # run druid cluster with override config + echo "Starting cluster with a config file at $DRUID_INTEGRATION_TEST_OVERRIDE_CONFIG_PATH" OVERRIDE_ENV=$DRUID_INTEGRATION_TEST_OVERRIDE_CONFIG_PATH docker-compose $(getComposeArgs) up -d fi } diff --git a/integration-tests/src/main/java/org/apache/druid/testing/clients/OverlordResourceTestClient.java b/integration-tests/src/main/java/org/apache/druid/testing/clients/OverlordResourceTestClient.java index a4932d1d533..271ac3d3e55 100644 --- a/integration-tests/src/main/java/org/apache/druid/testing/clients/OverlordResourceTestClient.java +++ b/integration-tests/src/main/java/org/apache/druid/testing/clients/OverlordResourceTestClient.java @@ -104,7 +104,7 @@ public class OverlordResourceTestClient response.getContent(), JacksonUtils.TYPE_REFERENCE_MAP_STRING_STRING ); String taskID = responseData.get("task"); - LOG.info("Submitted task with TaskID[%s]", taskID); + LOG.debug("Submitted task with TaskID[%s]", taskID); return taskID; }, Predicates.alwaysTrue(), @@ -127,7 +127,7 @@ public class OverlordResourceTestClient StringUtils.urlEncode(taskID) ) ); - LOG.info("Index status response" + response.getContent()); + LOG.debug("Index status response" + response.getContent()); TaskStatusResponse taskStatusResponse = jsonMapper.readValue( response.getContent(), new TypeReference() @@ -180,7 +180,7 @@ public class OverlordResourceTestClient HttpMethod.GET, StringUtils.format("%s%s", getIndexerURL(), identifier) ); - LOG.info("Tasks %s response %s", identifier, response.getContent()); + LOG.debug("Tasks %s response %s", identifier, response.getContent()); return jsonMapper.readValue( response.getContent(), new TypeReference>() { @@ -199,7 +199,7 @@ public class OverlordResourceTestClient HttpMethod.GET, StringUtils.format("%stask/%s", getIndexerURL(), taskId) ); - LOG.info("Task %s response %s", taskId, response.getContent()); + LOG.debug("Task %s response %s", taskId, response.getContent()); return jsonMapper.readValue( response.getContent(), new TypeReference() { @@ -380,7 +380,7 @@ public class OverlordResourceTestClient response.getContent(), JacksonUtils.TYPE_REFERENCE_MAP_STRING_STRING ); String id = responseData.get("id"); - LOG.info("Submitted supervisor with id[%s]", id); + LOG.debug("Submitted supervisor with id[%s]", id); return id; } catch (Exception e) { @@ -409,7 +409,7 @@ public class OverlordResourceTestClient response.getContent() ); } - LOG.info("Shutdown supervisor with id[%s]", id); + LOG.debug("Shutdown supervisor with id[%s]", id); } catch (ISE e) { throw e; @@ -440,7 +440,7 @@ public class OverlordResourceTestClient response.getContent() ); } - LOG.info("Terminate supervisor with id[%s]", id); + LOG.debug("Terminate supervisor with id[%s]", id); } catch (ISE e) { throw e; @@ -471,7 +471,7 @@ public class OverlordResourceTestClient response.getContent() ); } - LOG.info("Shutdown task with id[%s]", id); + LOG.debug("Shutdown task with id[%s]", id); } catch (ISE e) { throw e; @@ -511,7 +511,7 @@ public class OverlordResourceTestClient JacksonUtils.TYPE_REFERENCE_MAP_STRING_OBJECT ); String state = (String) payload.get("state"); - LOG.info("Supervisor id[%s] has state [%s]", id, state); + LOG.debug("Supervisor id[%s] has state [%s]", id, state); return SupervisorStateManager.BasicState.valueOf(state); } catch (ISE e) { @@ -543,7 +543,7 @@ public class OverlordResourceTestClient response.getContent() ); } - LOG.info("Suspended supervisor with id[%s]", id); + LOG.debug("Suspended supervisor with id[%s]", id); } catch (ISE e) { throw e; @@ -574,7 +574,7 @@ public class OverlordResourceTestClient response.getContent() ); } - LOG.info("stats supervisor with id[%s]", id); + LOG.debug("stats supervisor with id[%s]", id); } catch (ISE e) { throw e; @@ -605,7 +605,7 @@ public class OverlordResourceTestClient response.getContent() ); } - LOG.info("get supervisor health with id[%s]", id); + LOG.debug("get supervisor health with id[%s]", id); } catch (ISE e) { throw e; @@ -636,7 +636,7 @@ public class OverlordResourceTestClient response.getContent() ); } - LOG.info("Resumed supervisor with id[%s]", id); + LOG.debug("Resumed supervisor with id[%s]", id); } catch (ISE e) { throw e; @@ -667,7 +667,7 @@ public class OverlordResourceTestClient response.getContent() ); } - LOG.info("Reset supervisor with id[%s]", id); + LOG.debug("Reset supervisor with id[%s]", id); } catch (ISE e) { throw e; diff --git a/integration-tests/src/test/java/org/apache/druid/tests/query/ITSqlCancelTest.java b/integration-tests/src/test/java/org/apache/druid/tests/query/ITSqlCancelTest.java index 905dcfed011..e8a3fe5c875 100644 --- a/integration-tests/src/test/java/org/apache/druid/tests/query/ITSqlCancelTest.java +++ b/integration-tests/src/test/java/org/apache/druid/tests/query/ITSqlCancelTest.java @@ -101,7 +101,7 @@ public class ITSqlCancelTest 1000 ); if (!responseStatus.equals(HttpResponseStatus.ACCEPTED)) { - throw new RE("Failed to cancel query [%s]", queryId); + throw new RE("Failed to cancel query [%s]. Response code was [%s]", queryId, responseStatus); } for (Future queryResponseFuture : queryResponseFutures) { @@ -141,7 +141,7 @@ public class ITSqlCancelTest final StatusResponseHolder queryResponse = queryResponseFuture.get(30, TimeUnit.SECONDS); if (!queryResponse.getStatus().equals(HttpResponseStatus.OK)) { - throw new ISE("Query is not canceled after cancel request"); + throw new ISE("Cancel request failed with status[%s] and content[%s]", queryResponse.getStatus(), queryResponse.getContent()); } } } diff --git a/integration-tests/stop_cluster.sh b/integration-tests/stop_cluster.sh index 6fdbeb26f8c..b88fb91a8fb 100755 --- a/integration-tests/stop_cluster.sh +++ b/integration-tests/stop_cluster.sh @@ -26,6 +26,7 @@ then exit 0 fi +rm -rf $(dirname "$0")/../apache-druid-$DRUID_VERSION # stop hadoop container if it exists (can't use docker-compose down because it shares network) HADOOP_CONTAINER="$(docker ps -aq -f name=druid-it-hadoop)" @@ -38,7 +39,7 @@ fi # bring down using the same compose args we started with if [ -z "$DRUID_INTEGRATION_TEST_OVERRIDE_CONFIG_PATH" ] then - docker-compose $(getComposeArgs) down + OVERRIDE_ENV=environment-configs/empty-config docker-compose $(getComposeArgs) down else OVERRIDE_ENV=$DRUID_INTEGRATION_TEST_OVERRIDE_CONFIG_PATH docker-compose $(getComposeArgs) down fi