Use Druid's extension loading for integration test instead of maven (#12095)

* Use Druid's extension loading for integration test instead of maven

* fix maven command

* override config path

* load input format extensions and kafka by default; add prepopulated-data group

* all docker-composes are overridable

* fix s3 configs

* override config for all

* fix docker_compose_args

* fix security tests

* turn off debug logs for overlord api calls

* clean up stuff

* revert docker-compose.yml

* fix override config for query error test; fix circular dependency in docker compose

* add back some dependencies in docker compose

* new maven profile for integration test

* example file filter
This commit is contained in:
Jihoon Son 2022-01-05 23:33:04 -08:00 committed by GitHub
parent 6846622080
commit 4a74c5adcc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
31 changed files with 472 additions and 659 deletions

View File

@ -420,7 +420,7 @@ jobs:
- docker
env: TESTNG_GROUPS='-Dgroups=batch-index' JVM_RUNTIME='-Djvm.runtime=8' USE_INDEXER='middleManager'
script: &run_integration_test
- ${MVN} verify -pl integration-tests -P integration-tests ${TESTNG_GROUPS} ${JVM_RUNTIME} -Dit.indexer=${USE_INDEXER} ${MAVEN_SKIP}
- ${MVN} verify -pl integration-tests -P integration-tests ${TESTNG_GROUPS} ${JVM_RUNTIME} -Dit.indexer=${USE_INDEXER} ${MAVEN_SKIP} -Doverride.config.path=${OVERRIDE_CONFIG_PATH}
after_failure: &integration_test_diags
- for v in ~/shared/logs/*.log ; do
echo $v logtail ======================== ; tail -100 $v ;
@ -475,11 +475,11 @@ jobs:
- <<: *integration_perfect_rollup_parallel_batch_index
name: "(Compile=openjdk8, Run=openjdk8) perfect rollup parallel batch index integration test with deep storage as intermediate store"
env: TESTNG_GROUPS='-Dgroups=shuffle-deep-store' JVM_RUNTIME='-Djvm.runtime=8' USE_INDEXER='middleManager'
env: TESTNG_GROUPS='-Dgroups=shuffle-deep-store' JVM_RUNTIME='-Djvm.runtime=8' USE_INDEXER='middleManager' OVERRIDE_CONFIG_PATH='./environment-configs/test-groups/shuffle-deep-store'
- <<: *integration_perfect_rollup_parallel_batch_index
name: "(Compile=openjdk8, Run=openjdk8) perfect rollup parallel batch index integration test with deep storage as intermediate store with indexer"
env: TESTNG_GROUPS='-Dgroups=shuffle-deep-store' JVM_RUNTIME='-Djvm.runtime=8' USE_INDEXER='indexer'
env: TESTNG_GROUPS='-Dgroups=shuffle-deep-store' JVM_RUNTIME='-Djvm.runtime=8' USE_INDEXER='indexer' OVERRIDE_CONFIG_PATH='./environment-configs/test-groups/shuffle-deep-store'
- &integration_kafka_index
name: "(Compile=openjdk8, Run=openjdk8) kafka index integration test"
@ -496,7 +496,7 @@ jobs:
- <<: *integration_kafka_index
name: "(Compile=openjdk8, Run=openjdk8) custom coordinator duties integration test"
env: TESTNG_GROUPS='-Dgroups=custom-coordinator-duties' JVM_RUNTIME='-Djvm.runtime=8' USE_INDEXER='middleManager'
env: TESTNG_GROUPS='-Dgroups=custom-coordinator-duties' JVM_RUNTIME='-Djvm.runtime=8' USE_INDEXER='middleManager' OVERRIDE_CONFIG_PATH='./environment-configs/test-groups/custom-coordinator-duties'
- &integration_kafka_index_slow
name: "(Compile=openjdk8, Run=openjdk8) kafka index integration test slow"
@ -551,7 +551,7 @@ jobs:
stage: Tests - phase 2
jdk: openjdk8
services: *integration_test_services
env: TESTNG_GROUPS='-Dgroups=query' JVM_RUNTIME='-Djvm.runtime=8' USE_INDEXER='middleManager'
env: TESTNG_GROUPS='-Dgroups=query' JVM_RUNTIME='-Djvm.runtime=8' USE_INDEXER='middleManager' OVERRIDE_CONFIG_PATH='./environment-configs/test-groups/prepopulated-data'
script: *run_integration_test
after_failure: *integration_test_diags
@ -560,7 +560,7 @@ jobs:
stage: Tests - phase 2
jdk: openjdk8
services: *integration_test_services
env: TESTNG_GROUPS='-Dgroups=query-retry' JVM_RUNTIME='-Djvm.runtime=8' USE_INDEXER='middleManager'
env: TESTNG_GROUPS='-Dgroups=query-retry' JVM_RUNTIME='-Djvm.runtime=8' USE_INDEXER='middleManager' OVERRIDE_CONFIG_PATH='./environment-configs/test-groups/prepopulated-data'
script: *run_integration_test
after_failure: *integration_test_diags
@ -569,7 +569,7 @@ jobs:
stage: Tests - phase 2
jdk: openjdk8
services: *integration_test_services
env: TESTNG_GROUPS='-Dgroups=query-error' JVM_RUNTIME='-Djvm.runtime=8' USE_INDEXER='middleManager'
env: TESTNG_GROUPS='-Dgroups=query-error' JVM_RUNTIME='-Djvm.runtime=8' USE_INDEXER='middleManager' OVERRIDE_CONFIG_PATH='./environment-configs/test-groups/prepopulated-data'
script: *run_integration_test
after_failure: *integration_test_diags
@ -578,7 +578,7 @@ jobs:
stage: Tests - phase 2
jdk: openjdk8
services: *integration_test_services
env: TESTNG_GROUPS='-Dgroups=security' JVM_RUNTIME='-Djvm.runtime=8' USE_INDEXER='middleManager'
env: TESTNG_GROUPS='-Dgroups=security' JVM_RUNTIME='-Djvm.runtime=8' USE_INDEXER='middleManager' OVERRIDE_CONFIG_PATH='./environment-configs/test-groups/prepopulated-data'
script: *run_integration_test
after_failure: *integration_test_diags
@ -642,12 +642,12 @@ jobs:
- <<: *integration_tests
name: "(Compile=openjdk8, Run=openjdk8) leadership and high availability integration tests"
jdk: openjdk8
env: TESTNG_GROUPS='-Dgroups=high-availability' JVM_RUNTIME='-Djvm.runtime=8' USE_INDEXER='middleManager'
env: TESTNG_GROUPS='-Dgroups=high-availability' JVM_RUNTIME='-Djvm.runtime=8' USE_INDEXER='middleManager' OVERRIDE_CONFIG_PATH='./environment-configs/test-groups/prepopulated-data'
- <<: *integration_query
name: "(Compile=openjdk8, Run=openjdk8) query integration test (mariaDB)"
jdk: openjdk8
env: TESTNG_GROUPS='-Dgroups=query' USE_INDEXER='middleManager' MYSQL_DRIVER_CLASSNAME='org.mariadb.jdbc.Driver'
env: TESTNG_GROUPS='-Dgroups=query' USE_INDEXER='middleManager' MYSQL_DRIVER_CLASSNAME='org.mariadb.jdbc.Driver' OVERRIDE_CONFIG_PATH='./environment-configs/test-groups/prepopulated-data'
# END - Integration tests for Compile with Java 8 and Run with Java 8
@ -675,22 +675,22 @@ jobs:
- <<: *integration_query
name: "(Compile=openjdk8, Run=openjdk11) query integration test"
jdk: openjdk8
env: TESTNG_GROUPS='-Dgroups=query' JVM_RUNTIME='-Djvm.runtime=11' USE_INDEXER='middleManager'
env: TESTNG_GROUPS='-Dgroups=query' JVM_RUNTIME='-Djvm.runtime=11' USE_INDEXER='middleManager' OVERRIDE_CONFIG_PATH='./environment-configs/test-groups/prepopulated-data'
- <<: *integration_query_retry
name: "(Compile=openjdk8, Run=openjdk11) query retry integration test for missing segments"
jdk: openjdk8
env: TESTNG_GROUPS='-Dgroups=query-retry' JVM_RUNTIME='-Djvm.runtime=11' USE_INDEXER='middleManager'
env: TESTNG_GROUPS='-Dgroups=query-retry' JVM_RUNTIME='-Djvm.runtime=11' USE_INDEXER='middleManager' OVERRIDE_CONFIG_PATH='./environment-configs/test-groups/prepopulated-data'
- <<: *integration_query_error
name: "(Compile=openjdk8, Run=openjdk11) query error integration test for missing segments"
jdk: openjdk8
env: TESTNG_GROUPS='-Dgroups=query-error' JVM_RUNTIME='-Djvm.runtime=11' USE_INDEXER='middleManager'
env: TESTNG_GROUPS='-Dgroups=query-error' JVM_RUNTIME='-Djvm.runtime=11' USE_INDEXER='middleManager' OVERRIDE_CONFIG_PATH='./environment-configs/test-groups/prepopulated-data'
- <<: *integration_security
name: "(Compile=openjdk8, Run=openjdk11) security integration test"
jdk: openjdk8
env: TESTNG_GROUPS='-Dgroups=security' JVM_RUNTIME='-Djvm.runtime=11' USE_INDEXER='middleManager'
env: TESTNG_GROUPS='-Dgroups=security' JVM_RUNTIME='-Djvm.runtime=11' USE_INDEXER='middleManager' OVERRIDE_CONFIG_PATH='./environment-configs/test-groups/prepopulated-data'
- <<: *integration_ldap_security
name: "(Compile=openjdk8, Run=openjdk11) ldap security integration test"
@ -720,12 +720,12 @@ jobs:
- <<: *integration_tests
name: "(Compile=openjdk8, Run=openjdk11) leadership and high availability integration tests"
jdk: openjdk8
env: TESTNG_GROUPS='-Dgroups=high-availability' JVM_RUNTIME='-Djvm.runtime=11' USE_INDEXER='middleManager'
env: TESTNG_GROUPS='-Dgroups=high-availability' JVM_RUNTIME='-Djvm.runtime=11' USE_INDEXER='middleManager' OVERRIDE_CONFIG_PATH='./environment-configs/test-groups/prepopulated-data'
- <<: *integration_query
name: "(Compile=openjdk8, Run=openjdk11) query integration test (mariaDB)"
jdk: openjdk8
env: TESTNG_GROUPS='-Dgroups=query' JVM_RUNTIME='-Djvm.runtime=11' USE_INDEXER='middleManager' MYSQL_DRIVER_CLASSNAME='org.mariadb.jdbc.Driver'
env: TESTNG_GROUPS='-Dgroups=query' JVM_RUNTIME='-Djvm.runtime=11' USE_INDEXER='middleManager' MYSQL_DRIVER_CLASSNAME='org.mariadb.jdbc.Driver' OVERRIDE_CONFIG_PATH='./environment-configs/test-groups/prepopulated-data'
# END - Integration tests for Compile with Java 8 and Run with Java 11

View File

@ -622,5 +622,112 @@
</plugins>
</build>
</profile>
<profile>
<id>integration-test</id>
<activation>
<activeByDefault>false</activeByDefault>
</activation>
<build>
<plugins>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>exec-maven-plugin</artifactId>
<executions>
<execution>
<id>pull-deps</id>
<phase>package</phase>
<goals>
<goal>exec</goal>
</goals>
<configuration>
<executable>java</executable>
<arguments>
<argument>-classpath</argument>
<classpath />
<argument>-Ddruid.extensions.loadList=[]</argument>
<argument>-Ddruid.extensions.directory=${project.build.directory}/extensions
</argument>
<argument>
-Ddruid.extensions.hadoopDependenciesDir=${project.build.directory}/hadoop-dependencies
</argument>
<argument>org.apache.druid.cli.Main</argument>
<argument>tools</argument>
<argument>pull-deps</argument>
<argument>--clean</argument>
<argument>--defaultVersion</argument>
<argument>${project.parent.version}</argument>
<argument>-l</argument>
<argument>${settings.localRepository}</argument>
<argument>-h</argument>
<argument>org.apache.hadoop:hadoop-client:${hadoop.compile.version}</argument>
<argument>-c</argument>
<argument>org.apache.druid.extensions:druid-avro-extensions</argument>
<argument>-c</argument>
<argument>org.apache.druid.extensions:druid-azure-extensions</argument>
<argument>-c</argument>
<argument>org.apache.druid.extensions:druid-datasketches</argument>
<argument>-c</argument>
<argument>org.apache.druid.extensions:druid-hdfs-storage</argument>
<argument>-c</argument>
<argument>org.apache.druid.extensions:druid-histogram</argument>
<argument>-c</argument>
<argument>org.apache.druid.extensions:druid-kafka-indexing-service</argument>
<argument>-c</argument>
<argument>org.apache.druid.extensions:druid-kinesis-indexing-service</argument>
<argument>-c</argument>
<argument>org.apache.druid.extensions:druid-lookups-cached-global</argument>
<argument>-c</argument>
<argument>org.apache.druid.extensions:druid-protobuf-extensions</argument>
<argument>-c</argument>
<argument>org.apache.druid.extensions:mysql-metadata-storage</argument>
<argument>-c</argument>
<argument>org.apache.druid.extensions:druid-orc-extensions</argument>
<argument>-c</argument>
<argument>org.apache.druid.extensions:druid-parquet-extensions</argument>
<argument>-c</argument>
<argument>org.apache.druid.extensions:postgresql-metadata-storage</argument>
<argument>-c</argument>
<argument>org.apache.druid.extensions:druid-s3-extensions</argument>
<argument>-c</argument>
<argument>org.apache.druid.extensions:druid-ec2-extensions</argument>
<argument>-c</argument>
<argument>org.apache.druid.extensions:druid-google-extensions</argument>
<argument>-c</argument>
<argument>org.apache.druid.extensions:simple-client-sslcontext</argument>
<argument>-c</argument>
<argument>org.apache.druid.extensions:druid-basic-security</argument>
<argument>-c</argument>
<argument>org.apache.druid:druid-integration-tests</argument>
<argument>-c</argument>
<argument>org.apache.druid.extensions:druid-testing-tools</argument>
<argument>${druid.distribution.pulldeps.opts}</argument>
</arguments>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<executions>
<execution>
<id>distro-assembly</id>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
<configuration>
<finalName>apache-druid-${project.version}-integration-test</finalName>
<tarLongFileMode>posix</tarLongFileMode>
<descriptors>
<descriptor>src/assembly/integration-test-assembly.xml</descriptor>
</descriptors>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</profile>
</profiles>
</project>

View File

@ -0,0 +1,109 @@
<?xml version="1.0"?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one
~ or more contributor license agreements. See the NOTICE file
~ distributed with this work for additional information
~ regarding copyright ownership. The ASF licenses this file
~ to you under the Apache License, Version 2.0 (the
~ "License"); you may not use this file except in compliance
~ with the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing,
~ software distributed under the License is distributed on an
~ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
~ KIND, either express or implied. See the License for the
~ specific language governing permissions and limitations
~ under the License.
-->
<assembly xmlns="http://maven.apache.org/ASSEMBLY/2.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/ASSEMBLY/2.0.0 http://maven.apache.org/xsd/assembly-2.0.0.xsd">
<id>bin</id>
<formats>
<format>dir</format>
</formats>
<includeBaseDirectory>false</includeBaseDirectory>
<fileSets>
<fileSet>
<directory>${project.build.directory}/extensions</directory>
<includes>
<include>*/*</include>
</includes>
<outputDirectory>extensions</outputDirectory>
</fileSet>
<fileSet>
<directory>${project.build.directory}/hadoop-dependencies</directory>
<includes>
<include>*/*/*</include>
</includes>
<outputDirectory>hadoop-dependencies</outputDirectory>
</fileSet>
<fileSet>
<directory>../examples/conf</directory>
<includes>
<include>*</include>
<include>*/*</include>
<include>*/*/*</include>
<include>*/*/*/*</include>
<include>*/*/*/*/*</include>
</includes>
<outputDirectory>conf</outputDirectory>
</fileSet>
<fileSet>
<directory>../examples/quickstart/</directory>
<includes>
<include>*</include>
</includes>
<outputDirectory>quickstart</outputDirectory>
</fileSet>
<fileSet>
<directory>../examples/quickstart/tutorial</directory>
<includes>
<include>wikiticker-2015-09-12-sampled.json.gz</include>
</includes>
<outputDirectory>quickstart/tutorial</outputDirectory>
</fileSet>
<fileSet>
<directory>../examples/quickstart/tutorial/hadoop</directory>
<includes>
<include>*</include>
</includes>
<outputDirectory>quickstart/tutorial/hadoop</outputDirectory>
</fileSet>
<fileSet>
<directory>../examples/quickstart/tutorial/hadoop/docker</directory>
<includes>
<include>*</include>
</includes>
<outputDirectory>quickstart/tutorial/hadoop/docker</outputDirectory>
</fileSet>
<fileSet>
<directory>../examples/bin</directory>
<includes>
<include>*</include>
</includes>
<fileMode>744</fileMode>
<outputDirectory>bin</outputDirectory>
</fileSet>
</fileSets>
<dependencySets>
<dependencySet>
<useProjectArtifact>false</useProjectArtifact>
<useTransitiveDependencies>true</useTransitiveDependencies>
<outputDirectory>lib</outputDirectory>
<excludes>
<exclude>org.apache.druid:extensions-distribution</exclude>
</excludes>
</dependencySet>
<dependencySet>
<unpack>true</unpack>
<includes>
<include>org.apache.druid:extensions-distribution</include>
</includes>
</dependencySet>
</dependencySets>
</assembly>

View File

@ -344,11 +344,10 @@ The duties will be grouped into multiple groups as per the elements in list `dru
All duties in the same group will have the same run period configured by `druid.coordinator.<GROUP_NAME>.period`.
Currently, there is a single thread running the duties sequentially for each group.
For example, see `KillSupervisorsCustomDuty` for a custom coordinator duty implementation and `common-custom-coordinator-duties`
integration test group which loads `KillSupervisorsCustomDuty` using the configs set in `integration-tests/docker/environment-configs/common-custom-coordinator-duties`.
The relevant configs in `integration-tests/docker/environment-configs/common-custom-coordinator-duties` are as follows:
(The configs create a custom coordinator duty group called `cleanupMetadata` which runs a custom coordinator duty called `killSupervisors` every 10 seconds.
The custom coordinator duty `killSupervisors` also has a config called `retainDuration` which is set to 0 minute)
For example, see `KillSupervisorsCustomDuty` for a custom coordinator duty implementation and the `custom-coordinator-duties`
integration test group which loads `KillSupervisorsCustomDuty` using the configs set in `integration-tests/docker/environment-configs/test-groups/custom-coordinator-duties`.
This config file adds the configs below to enable a custom coordinator duty.
```
druid.coordinator.dutyGroups=["cleanupMetadata"]
druid.coordinator.cleanupMetadata.duties=["killSupervisors"]
@ -356,6 +355,9 @@ druid.coordinator.cleanupMetadata.duty.killSupervisors.retainDuration=PT0M
druid.coordinator.cleanupMetadata.period=PT10S
```
These configurations create a custom coordinator duty group called `cleanupMetadata` which runs a custom coordinator duty called `killSupervisors` every 10 seconds.
The custom coordinator duty `killSupervisors` also has a config called `retainDuration` which is set to 0 minute.
### Routing data through a HTTP proxy for your extension
You can add the ability for the `HttpClient` of your extension to connect through an HTTP proxy.

View File

@ -103,7 +103,7 @@ Druid routers for security group integration test (permissive tls, no client aut
- To start Druid cluster with override configs
```bash
OVERRIDE_ENV=<PATH_TO_ENV> docker-compose -f docker-compose.override-env.yml up
OVERRIDE_ENV=<PATH_TO_ENV> docker-compose -f docker-compose.yml up
```
- To start tests against Hadoop
@ -127,31 +127,30 @@ Druid routers for security group integration test (permissive tls, no client aut
- docker-compose.base.yml
Base file that defines all containers for integration test
Base file that defines all containers for integration testing
- docker-compose.yml
Defines Druid cluster with default configuration that is used for running integration tests.
Defines a Druid cluster with default configuration that is used for running integration tests.
```bash
docker-compose -f docker-compose.yml up
// DRUID_INTEGRATION_TEST_GROUP - this variable is used in Druid docker container for "security" and "query" test group. Use next docker-compose if you want to run security/query tests.
DRUID_INTEGRATION_TEST_GROUP=security docker-compose -f docker-compose.yml up
# DRUID_INTEGRATION_TEST_GROUP - an environment variable that specifies the integration test group to run.
DRUID_INTEGRATION_TEST_GROUP=batch-index docker-compose -f docker-compose.yml up
```
- docker-compose.override-env.yml
Defines Druid cluster with default configuration plus any additional and/or overriden configurations from override-env file.
You can change the default configuration using a custom configuration file. The settings in the file will override
the default settings if they conflict. They will be appended to the default configuration otherwise.
```bash
// OVERRIDE_ENV - variable that must contains path to Druid configuration file
OVERRIDE_ENV=./environment-configs/override-examples/s3 docker-compose -f docker-compose.override-env.yml up
# OVERRIDE_ENV - an environment variable that specifies the custom configuration file path.
OVERRIDE_ENV=./environment-configs/test-groups/prepopulated-data DRUID_INTEGRATION_TEST_GROUP=query docker-compose -f docker-compose.yml up
```
- docker-compose.security.yml
Defines three additional Druid router services with permissive tls, no client auth tls, and custom check tls respectively.
This is meant to be use together with docker-compose.yml or docker-compose.override-env.yml and is only needed for the "security" group integration test.
This is meant to be used together with docker-compose.yml and is only needed for the "security" group integration test.
```bash
docker-compose -f docker-compose.yml -f docker-compose.security.yml up

View File

@ -46,6 +46,7 @@ RUN find /var/lib/mysql -type f -exec touch {} \; && /etc/init.d/mysql start \
# Add Druid jars
ADD lib/* /usr/local/druid/lib/
COPY extensions/ /usr/local/druid/extensions/
# Download the MySQL Java connector
# target path must match the exact path referenced in environment-configs/common
@ -65,7 +66,7 @@ RUN wget -q "https://packages.confluent.io/maven/io/confluent/kafka-protobuf-pro
# Add sample data
# touch is needed because OverlayFS's copy-up operation breaks POSIX standards. See https://github.com/docker/for-linux/issues/72.
RUN find /var/lib/mysql -type f -exec touch {} \; && service mysql start \
&& java -cp "/usr/local/druid/lib/*" -Ddruid.metadata.storage.type=mysql -Ddruid.metadata.mysql.driver.driverClassName=$MYSQL_DRIVER_CLASSNAME org.apache.druid.cli.Main tools metadata-init --connectURI="jdbc:mysql://localhost:3306/druid" --user=druid --password=diurd \
&& java -cp "/usr/local/druid/lib/*" -Ddruid.extensions.directory=/usr/local/druid/extensions -Ddruid.extensions.loadList='["mysql-metadata-storage"]' -Ddruid.metadata.storage.type=mysql -Ddruid.metadata.mysql.driver.driverClassName=$MYSQL_DRIVER_CLASSNAME org.apache.druid.cli.Main tools metadata-init --connectURI="jdbc:mysql://localhost:3306/druid" --user=druid --password=diurd \
&& /etc/init.d/mysql stop
ADD test-data /test-data

View File

@ -89,6 +89,7 @@ services:
env_file:
- ./environment-configs/common
- ./environment-configs/overlord
- ${OVERRIDE_ENV}
druid-overlord-two:
image: druid/cluster
@ -108,6 +109,7 @@ services:
env_file:
- ./environment-configs/common
- ./environment-configs/overlord
- ${OVERRIDE_ENV}
### coordinators
druid-coordinator:
@ -128,6 +130,7 @@ services:
env_file:
- ./environment-configs/common
- ./environment-configs/coordinator
- ${OVERRIDE_ENV}
druid-coordinator-two:
image: druid/cluster
@ -147,6 +150,7 @@ services:
env_file:
- ./environment-configs/common
- ./environment-configs/coordinator
- ${OVERRIDE_ENV}
### historicals
druid-historical:
@ -167,6 +171,7 @@ services:
env_file:
- ./environment-configs/common
- ./environment-configs/historical
- ${OVERRIDE_ENV}
### middle managers
druid-middlemanager:
@ -200,6 +205,7 @@ services:
env_file:
- ./environment-configs/common
- ./environment-configs/middlemanager
- ${OVERRIDE_ENV}
### indexers
druid-indexer:
@ -221,6 +227,7 @@ services:
env_file:
- ./environment-configs/common
- ./environment-configs/indexer
- ${OVERRIDE_ENV}
### brokers
druid-broker:
@ -241,6 +248,7 @@ services:
env_file:
- ./environment-configs/common
- ./environment-configs/broker
- ${OVERRIDE_ENV}
### routers
druid-router:
@ -261,6 +269,7 @@ services:
env_file:
- ./environment-configs/common
- ./environment-configs/router
- ${OVERRIDE_ENV}
druid-router-permissive-tls:
image: druid/cluster
@ -279,6 +288,7 @@ services:
env_file:
- ./environment-configs/common
- ./environment-configs/router-permissive-tls
- ${OVERRIDE_ENV}
druid-router-no-client-auth-tls:
image: druid/cluster
@ -297,6 +307,7 @@ services:
env_file:
- ./environment-configs/common
- ./environment-configs/router-no-client-auth-tls
- ${OVERRIDE_ENV}
druid-router-custom-check-tls:
image: druid/cluster
@ -315,6 +326,7 @@ services:
env_file:
- ./environment-configs/common
- ./environment-configs/router-custom-check-tls
- ${OVERRIDE_ENV}
### optional supporting infra

View File

@ -1,103 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
version: "2.2"
services:
druid-zookeeper-kafka:
extends:
file: docker-compose.base.yml
service: druid-zookeeper-kafka
druid-metadata-storage:
extends:
file: docker-compose.base.yml
service: druid-metadata-storage
environment:
- DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
depends_on:
- druid-zookeeper-kafka
druid-coordinator:
extends:
file: docker-compose.base.yml
service: druid-coordinator
env_file:
- ./environment-configs/common-custom-coordinator-duties
environment:
- DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
depends_on:
- druid-metadata-storage
- druid-zookeeper-kafka
druid-overlord:
extends:
file: docker-compose.base.yml
service: druid-overlord
environment:
- DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
depends_on:
- druid-coordinator
- druid-metadata-storage
- druid-zookeeper-kafka
druid-historical:
extends:
file: docker-compose.base.yml
service: druid-historical
environment:
- DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
depends_on:
- druid-zookeeper-kafka
druid-middlemanager:
extends:
file: docker-compose.base.yml
service: druid-middlemanager
environment:
- DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
depends_on:
- druid-zookeeper-kafka
- druid-overlord
druid-broker:
extends:
file: docker-compose.base.yml
service: druid-broker
environment:
- DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
depends_on:
- druid-coordinator
- druid-zookeeper-kafka
- druid-middlemanager
- druid-historical
druid-router:
extends:
file: docker-compose.base.yml
service: druid-router
environment:
- DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
depends_on:
- druid-zookeeper-kafka
- druid-coordinator
- druid-broker
- druid-overlord
networks:
druid-it-net:
name: druid-it-net
ipam:
config:
- subnet: 172.172.172.0/24

View File

@ -138,6 +138,7 @@ services:
- druid_server_https_crlPath=/tls/revocations.crl
env_file:
- ./environment-configs/common
- ${OVERRIDE_ENV}
depends_on:
- druid-zookeeper-kafka
- druid-coordinator

View File

@ -56,6 +56,7 @@ services:
- druid-openldap
- druid-metadata-storage
- druid-zookeeper-kafka
- druid-overlord
druid-overlord:
extends:
@ -68,7 +69,6 @@ services:
- DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
depends_on:
- druid-openldap
- druid-coordinator
- druid-metadata-storage
- druid-zookeeper-kafka

View File

@ -1,109 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
version: "2.2"
services:
druid-zookeeper-kafka:
extends:
file: docker-compose.base.yml
service: druid-zookeeper-kafka
druid-metadata-storage:
extends:
file: docker-compose.base.yml
service: druid-metadata-storage
depends_on:
- druid-zookeeper-kafka
druid-overlord:
extends:
file: docker-compose.base.yml
service: druid-overlord
env_file:
- ${OVERRIDE_ENV}
environment:
- DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
depends_on:
- druid-metadata-storage
- druid-zookeeper-kafka
druid-coordinator:
extends:
file: docker-compose.base.yml
service: druid-coordinator
env_file:
- ${OVERRIDE_ENV}
environment:
- DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
depends_on:
- druid-overlord
- druid-metadata-storage
- druid-zookeeper-kafka
druid-historical:
extends:
file: docker-compose.base.yml
service: druid-historical
env_file:
- ${OVERRIDE_ENV}
environment:
- DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
depends_on:
- druid-zookeeper-kafka
druid-middlemanager:
extends:
file: docker-compose.base.yml
service: druid-middlemanager
env_file:
- ${OVERRIDE_ENV}
environment:
- DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
depends_on:
- druid-zookeeper-kafka
- druid-overlord
druid-broker:
extends:
file: docker-compose.base.yml
service: druid-broker
env_file:
- ${OVERRIDE_ENV}
environment:
- DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
depends_on:
- druid-zookeeper-kafka
- druid-middlemanager
- druid-historical
druid-router:
extends:
file: docker-compose.base.yml
service: druid-router
env_file:
- ${OVERRIDE_ENV}
environment:
- DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
depends_on:
- druid-zookeeper-kafka
- druid-coordinator
- druid-broker
networks:
druid-it-net:
name: druid-it-net
ipam:
config:
- subnet: 172.172.172.0/24

View File

@ -88,6 +88,7 @@ services:
env_file:
- ./environment-configs/common
- ./environment-configs/historical-for-query-error-test
- ${OVERRIDE_ENV}
environment:
- DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
depends_on:

View File

@ -97,6 +97,7 @@ services:
env_file:
- ./environment-configs/common
- ./environment-configs/historical-for-query-error-test
- ${OVERRIDE_ENV}
environment:
- DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
depends_on:

View File

@ -1,105 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
version: "2.2"
services:
druid-zookeeper-kafka:
extends:
file: docker-compose.base.yml
service: druid-zookeeper-kafka
druid-metadata-storage:
extends:
file: docker-compose.base.yml
service: druid-metadata-storage
environment:
- DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
depends_on:
- druid-zookeeper-kafka
druid-coordinator:
extends:
file: docker-compose.base.yml
service: druid-coordinator
environment:
- DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
depends_on:
- druid-metadata-storage
- druid-zookeeper-kafka
druid-overlord:
extends:
file: docker-compose.base.yml
service: druid-overlord
env_file:
- ./environment-configs/common-shuffle-deep-store
environment:
- DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
depends_on:
- druid-coordinator
- druid-metadata-storage
- druid-zookeeper-kafka
druid-historical:
extends:
file: docker-compose.base.yml
service: druid-historical
environment:
- DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
depends_on:
- druid-zookeeper-kafka
druid-middlemanager:
extends:
file: docker-compose.base.yml
service: druid-middlemanager
env_file:
- ./environment-configs/common-shuffle-deep-store
environment:
- DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
depends_on:
- druid-zookeeper-kafka
- druid-overlord
druid-broker:
extends:
file: docker-compose.base.yml
service: druid-broker
environment:
- DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
depends_on:
- druid-coordinator
- druid-zookeeper-kafka
- druid-middlemanager
- druid-historical
druid-router:
extends:
file: docker-compose.base.yml
service: druid-router
environment:
- DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
depends_on:
- druid-zookeeper-kafka
- druid-coordinator
- druid-broker
- druid-overlord
networks:
druid-it-net:
name: druid-it-net
ipam:
config:
- subnet: 172.172.172.0/24

View File

@ -36,6 +36,7 @@ services:
environment:
- DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
depends_on:
- druid-overlord
- druid-metadata-storage
- druid-zookeeper-kafka
@ -46,7 +47,6 @@ services:
environment:
- DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
depends_on:
- druid-coordinator
- druid-metadata-storage
- druid-zookeeper-kafka

View File

@ -88,17 +88,8 @@ setupData()
if [ "$DRUID_INTEGRATION_TEST_GROUP" = "query" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "query-retry" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "query-error" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "high-availability" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "security" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "ldap-security" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "upgrade" ]; then
# touch is needed because OverlayFS's copy-up operation breaks POSIX standards. See https://github.com/docker/for-linux/issues/72.
find /var/lib/mysql -type f -exec touch {} \; && service mysql start \
&& cat /test-data/${DRUID_INTEGRATION_TEST_GROUP}-sample-data.sql | mysql -u root druid && /etc/init.d/mysql stop
# below s3 credentials needed to access the pre-existing s3 bucket
setKey $DRUID_SERVICE druid.s3.accessKey AKIAT2GGLKKJQCMG64V4
setKey $DRUID_SERVICE druid.s3.secretKey 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
if [ "$DRUID_INTEGRATION_TEST_GROUP" = "query-retry" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "query-error" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "high-availability" ]; then
setKey $DRUID_SERVICE druid.extensions.loadList [\"druid-s3-extensions\",\"druid-integration-tests\"]
else
setKey $DRUID_SERVICE druid.extensions.loadList [\"druid-s3-extensions\"]
fi
# The region of the sample data s3 blobs needed for these test groups
export AWS_REGION=us-east-1
&& cat /test-data/${DRUID_INTEGRATION_TEST_GROUP}-sample-data.sql | mysql -u root druid \
&& /etc/init.d/mysql stop
fi
if [ "$MYSQL_DRIVER_CLASSNAME" != "com.mysql.jdbc.Driver" ] ; then
@ -114,4 +105,3 @@ setupData()
&& /etc/init.d/mysql stop
fi
}

View File

@ -26,7 +26,8 @@ COMMON_DRUID_JAVA_OPTS=-Duser.timezone=UTC -Dfile.encoding=UTF-8 -Dlog4j.configu
DRUID_DEP_LIB_DIR=/shared/hadoop_xml:/shared/docker/lib/*:/usr/local/druid/lib/mysql-connector-java.jar
# Druid configs
druid_extensions_loadList=[]
druid_extensions_loadList=["mysql-metadata-storage","druid-basic-security","simple-client-sslcontext","druid-testing-tools","druid-lookups-cached-global","druid-histogram","druid-datasketches","druid-parquet-extensions","druid-avro-extensions","druid-protobuf-extensions","druid-orc-extensions","druid-kafka-indexing-service"]
druid_startup_logging_logProperties=true
druid_extensions_directory=/shared/docker/extensions
druid_auth_authenticator_basic_authorizerName=basic
druid_auth_authenticator_basic_initialAdminPassword=priest

View File

@ -1,81 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
LANG=C.UTF-8
LANGUAGE=C.UTF-8
LC_ALL=C.UTF-8
# JAVA OPTS
COMMON_DRUID_JAVA_OPTS=-Duser.timezone=UTC -Dfile.encoding=UTF-8 -Dlog4j.configurationFile=/shared/docker/lib/log4j2.xml -XX:+ExitOnOutOfMemoryError -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/tmp
DRUID_DEP_LIB_DIR=/shared/hadoop_xml:/shared/docker/lib/*:/usr/local/druid/lib/mysql-connector-java.jar
# Druid configs
druid_extensions_loadList=[]
druid_extensions_directory=/shared/docker/extensions
druid_auth_authenticator_basic_authorizerName=basic
druid_auth_authenticator_basic_initialAdminPassword=priest
druid_auth_authenticator_basic_initialInternalClientPassword=warlock
druid_auth_authenticator_basic_type=basic
druid_auth_authenticatorChain=["basic"]
druid_auth_authorizer_basic_type=basic
druid_auth_authorizers=["basic"]
druid_client_https_certAlias=druid
druid_client_https_keyManagerPassword=druid123
druid_client_https_keyStorePassword=druid123
druid_client_https_keyStorePath=/tls/server.jks
druid_client_https_protocol=TLSv1.2
druid_client_https_trustStoreAlgorithm=PKIX
druid_client_https_trustStorePassword=druid123
druid_client_https_trustStorePath=/tls/truststore.jks
druid_enableTlsPort=true
druid_escalator_authorizerName=basic
druid_escalator_internalClientPassword=warlock
druid_escalator_internalClientUsername=druid_system
druid_escalator_type=basic
druid_lookup_numLookupLoadingThreads=1
druid_server_http_numThreads=20
# Allow OPTIONS method for ITBasicAuthConfigurationTest.testSystemSchemaAccess
druid_server_http_allowedHttpMethods=["OPTIONS"]
druid_server_https_certAlias=druid
druid_server_https_keyManagerPassword=druid123
druid_server_https_keyStorePassword=druid123
druid_server_https_keyStorePath=/tls/server.jks
druid_server_https_keyStoreType=jks
druid_server_https_requireClientCertificate=true
druid_server_https_trustStoreAlgorithm=PKIX
druid_server_https_trustStorePassword=druid123
druid_server_https_trustStorePath=/tls/truststore.jks
druid_server_https_validateHostnames=true
druid_zk_service_host=druid-zookeeper-kafka
druid_auth_basic_common_maxSyncRetries=20
druid_indexer_logs_directory=/shared/tasklogs
druid_sql_enable=true
druid_extensions_hadoopDependenciesDir=/shared/hadoop-dependencies
druid_request_logging_type=slf4j
# Testing the legacy config from https://github.com/apache/druid/pull/10267
# Can remove this when the flag is no longer needed
druid_indexer_task_ignoreTimestampSpecForDruidInputSource=true
#Testing kill supervisor custom coordinator duty
druid_coordinator_kill_supervisor_on=false
druid_coordinator_dutyGroups=["cleanupMetadata"]
druid_coordinator_cleanupMetadata_duties=["killSupervisors"]
druid_coordinator_cleanupMetadata_duty_killSupervisors_retainDuration=PT0M
druid_coordinator_cleanupMetadata_period=PT10S

View File

@ -20,13 +20,14 @@
LANG=C.UTF-8
LANGUAGE=C.UTF-8
LC_ALL=C.UTF-8
AWS_REGION=us-east-1
# JAVA OPTS
COMMON_DRUID_JAVA_OPTS=-Duser.timezone=UTC -Dfile.encoding=UTF-8 -Dlog4j.configurationFile=/shared/docker/lib/log4j2.xml -XX:+ExitOnOutOfMemoryError -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/tmp
DRUID_DEP_LIB_DIR=/shared/hadoop_xml:/shared/docker/lib/*:/usr/local/druid/lib/mysql-connector-java.jar
# Druid configs
druid_extensions_loadList=[]
druid_extensions_loadList=["mysql-metadata-storage","druid-s3-extensions","druid-basic-security","simple-client-sslcontext","druid-testing-tools","druid-lookups-cached-global","druid-histogram","druid-datasketches"]
druid_extensions_directory=/shared/docker/extensions
druid_auth_authenticator_ldap_authorizerName=ldapauth
druid_auth_authenticator_ldap_initialAdminPassword=priest
@ -77,4 +78,8 @@ druid_auth_basic_common_maxSyncRetries=20
druid_indexer_logs_directory=/shared/tasklogs
druid_sql_enable=true
druid_extensions_hadoopDependenciesDir=/shared/hadoop-dependencies
druid_request_logging_type=slf4j
druid_request_logging_type=slf4j
# Setting s3 credentials and region to use pre-populated data for testing.
druid_s3_accessKey=AKIAT2GGLKKJQCMG64V4
druid_s3_secretKey=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

View File

@ -1,81 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
LANG=C.UTF-8
LANGUAGE=C.UTF-8
LC_ALL=C.UTF-8
# JAVA OPTS
COMMON_DRUID_JAVA_OPTS=-Duser.timezone=UTC -Dfile.encoding=UTF-8 -Dlog4j.configurationFile=/shared/docker/lib/log4j2.xml -XX:+ExitOnOutOfMemoryError -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/tmp
DRUID_DEP_LIB_DIR=/shared/hadoop_xml:/shared/docker/lib/*:/usr/local/druid/lib/mysql-connector-java.jar
# Druid configs
druid_extensions_loadList=[]
druid_extensions_directory=/shared/docker/extensions
druid_auth_authenticator_basic_authorizerName=basic
druid_auth_authenticator_basic_initialAdminPassword=priest
druid_auth_authenticator_basic_initialInternalClientPassword=warlock
druid_auth_authenticator_basic_type=basic
druid_auth_authenticatorChain=["basic"]
druid_auth_authorizer_basic_type=basic
druid_auth_authorizers=["basic"]
druid_client_https_certAlias=druid
druid_client_https_keyManagerPassword=druid123
druid_client_https_keyStorePassword=druid123
druid_client_https_keyStorePath=/tls/server.jks
druid_client_https_protocol=TLSv1.2
druid_client_https_trustStoreAlgorithm=PKIX
druid_client_https_trustStorePassword=druid123
druid_client_https_trustStorePath=/tls/truststore.jks
druid_enableTlsPort=true
druid_escalator_authorizerName=basic
druid_escalator_internalClientPassword=warlock
druid_escalator_internalClientUsername=druid_system
druid_escalator_type=basic
druid_lookup_numLookupLoadingThreads=1
druid_server_http_numThreads=20
# Allow OPTIONS method for ITBasicAuthConfigurationTest.testSystemSchemaAccess
druid_server_http_allowedHttpMethods=["OPTIONS"]
druid_server_https_certAlias=druid
druid_server_https_keyManagerPassword=druid123
druid_server_https_keyStorePassword=druid123
druid_server_https_keyStorePath=/tls/server.jks
druid_server_https_keyStoreType=jks
druid_server_https_requireClientCertificate=true
druid_server_https_trustStoreAlgorithm=PKIX
druid_server_https_trustStorePassword=druid123
druid_server_https_trustStorePath=/tls/truststore.jks
druid_server_https_validateHostnames=true
druid_zk_service_host=druid-zookeeper-kafka
druid_auth_basic_common_maxSyncRetries=20
druid_indexer_logs_directory=/shared/tasklogs
druid_sql_enable=true
druid_extensions_hadoopDependenciesDir=/shared/hadoop-dependencies
druid_request_logging_type=slf4j
druid_coordinator_kill_supervisor_on=true
druid_coordinator_kill_supervisor_period=PT10S
druid_coordinator_kill_supervisor_durationToRetain=PT0M
druid_coordinator_period_metadataStoreManagementPeriod=PT10S
# Testing the legacy config from https://github.com/apache/druid/pull/10267
# Can remove this when the flag is no longer needed
druid_indexer_task_ignoreTimestampSpecForDruidInputSource=true
# Test with deep storage as intermediate location to store shuffle data
# Local deep storage will be used here
druid_processing_intermediaryData_storage_type=deepstore

View File

@ -0,0 +1,18 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#

View File

@ -0,0 +1,30 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
druid_extensions_loadList=["druid-kafka-indexing-service","mysql-metadata-storage","druid-s3-extensions","druid-basic-security","simple-client-sslcontext","druid-testing-tools","druid-lookups-cached-global","druid-histogram","druid-datasketches"]
druid_coordinator_period_metadataStoreManagementPeriod=PT1H
druid_sql_planner_authorizeSystemTablesDirectly=false
#Testing kill supervisor custom coordinator duty
druid_coordinator_kill_supervisor_on=false
druid_coordinator_dutyGroups=["cleanupMetadata"]
druid_coordinator_cleanupMetadata_duties=["killSupervisors"]
druid_coordinator_cleanupMetadata_duty_killSupervisors_retainDuration=PT0M
druid_coordinator_cleanupMetadata_period=PT10S

View File

@ -0,0 +1,26 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
AWS_REGION=us-east-1
druid_extensions_loadList=["mysql-metadata-storage","druid-s3-extensions","druid-basic-security","simple-client-sslcontext","druid-testing-tools","druid-lookups-cached-global","druid-histogram","druid-datasketches","druid-integration-tests"]
# Setting s3 credentials and region to use pre-populated data for testing.
druid_s3_accessKey=AKIAT2GGLKKJQCMG64V4
druid_s3_secretKey=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

View File

@ -0,0 +1,23 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Test with deep storage as intermediate location to store shuffle data
# Local deep storage will be used here
druid_extensions_loadList=["mysql-metadata-storage","druid-basic-security","simple-client-sslcontext","druid-testing-tools","druid-lookups-cached-global","druid-histogram","druid-datasketches"]
druid_processing_intermediaryData_storage_type=deepstore

View File

@ -95,13 +95,13 @@
<groupId>com.amazonaws</groupId>
<artifactId>aws-java-sdk-s3</artifactId>
<version>${aws.sdk.version}</version>
<scope>runtime</scope>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.druid.extensions</groupId>
<artifactId>druid-orc-extensions</artifactId>
<version>${project.parent.version}</version>
<scope>runtime</scope>
<scope>provided</scope>
<exclusions>
<exclusion>
<groupId>javax.servlet</groupId>
@ -117,49 +117,49 @@
<groupId>org.apache.druid.extensions</groupId>
<artifactId>druid-parquet-extensions</artifactId>
<version>${project.parent.version}</version>
<scope>runtime</scope>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.druid.extensions</groupId>
<artifactId>druid-avro-extensions</artifactId>
<version>${project.parent.version}</version>
<scope>runtime</scope>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.druid.extensions</groupId>
<artifactId>druid-protobuf-extensions</artifactId>
<version>${project.parent.version}</version>
<scope>runtime</scope>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.druid.extensions</groupId>
<artifactId>druid-s3-extensions</artifactId>
<version>${project.parent.version}</version>
<scope>runtime</scope>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.druid.extensions</groupId>
<artifactId>druid-kinesis-indexing-service</artifactId>
<version>${project.parent.version}</version>
<scope>runtime</scope>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.druid.extensions</groupId>
<artifactId>druid-azure-extensions</artifactId>
<version>${project.parent.version}</version>
<scope>runtime</scope>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.druid.extensions</groupId>
<artifactId>druid-google-extensions</artifactId>
<version>${project.parent.version}</version>
<scope>runtime</scope>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.druid.extensions</groupId>
<artifactId>druid-hdfs-storage</artifactId>
<version>${project.parent.version}</version>
<scope>runtime</scope>
<scope>provided</scope>
<exclusions>
<exclusion>
<groupId>com.amazonaws</groupId>
@ -171,19 +171,19 @@
<groupId>org.apache.druid.extensions</groupId>
<artifactId>druid-datasketches</artifactId>
<version>${project.parent.version}</version>
<scope>runtime</scope>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.druid.extensions</groupId>
<artifactId>druid-histogram</artifactId>
<version>${project.parent.version}</version>
<scope>runtime</scope>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.druid</groupId>
<artifactId>druid-aws-common</artifactId>
<version>${project.parent.version}</version>
<scope>runtime</scope>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.druid</groupId>
@ -204,7 +204,7 @@
<groupId>org.apache.druid.extensions</groupId>
<artifactId>mysql-metadata-storage</artifactId>
<version>${project.parent.version}</version>
<scope>runtime</scope>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.druid.extensions</groupId>
@ -226,13 +226,13 @@
<groupId>org.apache.druid.extensions</groupId>
<artifactId>druid-lookups-cached-global</artifactId>
<version>${project.parent.version}</version>
<scope>runtime</scope>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.druid.extensions</groupId>
<artifactId>druid-testing-tools</artifactId>
<version>${project.parent.version}</version>
<scope>runtime</scope>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.druid.extensions</groupId>
@ -301,7 +301,7 @@
<dependency>
<groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j-slf4j-impl</artifactId>
<scope>runtime</scope>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>com.google.code.findbugs</groupId>
@ -602,6 +602,7 @@
<KAFKA_VERSION>${apache.kafka.version}</KAFKA_VERSION>
<ZK_VERSION>${zookeeper.version}</ZK_VERSION>
<HADOOP_VERSION>${hadoop.compile.version}</HADOOP_VERSION>
<DRUID_VERSION>${parent.version}</DRUID_VERSION>
</environmentVariables>
<executable>${project.basedir}/build_run_cluster.sh</executable>
</configuration>
@ -618,6 +619,7 @@
<DRUID_INTEGRATION_TEST_GROUP>${groups}</DRUID_INTEGRATION_TEST_GROUP>
<DRUID_INTEGRATION_TEST_OVERRIDE_CONFIG_PATH>${override.config.path}</DRUID_INTEGRATION_TEST_OVERRIDE_CONFIG_PATH>
<DRUID_INTEGRATION_TEST_INDEXER>${it.indexer}</DRUID_INTEGRATION_TEST_INDEXER>
<DRUID_VERSION>${parent.version}</DRUID_VERSION>
</environmentVariables>
<executable>${project.basedir}/stop_cluster.sh</executable>
</configuration>

View File

@ -27,9 +27,15 @@ cp -r client_tls docker/client_tls
rm -rf $SHARED_DIR/docker
mkdir -p $SHARED_DIR
cp -R docker $SHARED_DIR/docker
mvn -B dependency:copy-dependencies -DoutputDirectory=$SHARED_DIR/docker/lib
# Make directories if they dont exist
pushd ../
rm -rf distribution/target/apache-druid-$DRUID_VERSION-integration-test-bin
mvn -DskipTests -T1C -Danimal.sniffer.skip=true -Dcheckstyle.skip=true -Ddruid.console.skip=true -Denforcer.skip=true -Dforbiddenapis.skip=true -Dmaven.javadoc.skip=true -Dpmd.skip=true -Dspotbugs.skip=true install -Pintegration-test
mv distribution/target/apache-druid-$DRUID_VERSION-integration-test-bin/lib $SHARED_DIR/docker/lib
mv distribution/target/apache-druid-$DRUID_VERSION-integration-test-bin/extensions $SHARED_DIR/docker/extensions
popd
# Make directoriess if they dont exist
mkdir -p $SHARED_DIR/hadoop_xml
mkdir -p $SHARED_DIR/hadoop-dependencies
mkdir -p $SHARED_DIR/logs
@ -40,37 +46,8 @@ mkdir -p $SHARED_DIR/docker/credentials
# install logging config
cp src/main/resources/log4j2.xml $SHARED_DIR/docker/lib/log4j2.xml
# copy the integration test jar, it provides test-only extension implementations
cp target/druid-integration-tests*.jar $SHARED_DIR/docker/lib
# move extensions into a seperate extension folder
# For druid-integration-tests
mkdir -p $SHARED_DIR/docker/extensions/druid-integration-tests
# We don't want to copy tests jar.
cp $SHARED_DIR/docker/lib/druid-integration-tests-*[^s].jar $SHARED_DIR/docker/extensions/druid-integration-tests
# For druid-s3-extensions
mkdir -p $SHARED_DIR/docker/extensions/druid-s3-extensions
mv $SHARED_DIR/docker/lib/druid-s3-extensions-* $SHARED_DIR/docker/extensions/druid-s3-extensions
# For druid-azure-extensions
mkdir -p $SHARED_DIR/docker/extensions/druid-azure-extensions
mv $SHARED_DIR/docker/lib/druid-azure-extensions-* $SHARED_DIR/docker/extensions/druid-azure-extensions
# For druid-google-extensions
mkdir -p $SHARED_DIR/docker/extensions/druid-google-extensions
mv $SHARED_DIR/docker/lib/druid-google-extensions-* $SHARED_DIR/docker/extensions/druid-google-extensions
# For druid-hdfs-storage
mkdir -p $SHARED_DIR/docker/extensions/druid-hdfs-storage
mv $SHARED_DIR/docker/lib/druid-hdfs-storage-* $SHARED_DIR/docker/extensions/druid-hdfs-storage
# For druid-kinesis-indexing-service
mkdir -p $SHARED_DIR/docker/extensions/druid-kinesis-indexing-service
mv $SHARED_DIR/docker/lib/druid-kinesis-indexing-service-* $SHARED_DIR/docker/extensions/druid-kinesis-indexing-service
# For druid-parquet-extensions
# Using cp so that this extensions is included when running Druid without loadList and as a option for the loadList
mkdir -p $SHARED_DIR/docker/extensions/druid-parquet-extensions
cp $SHARED_DIR/docker/lib/druid-parquet-extensions-* $SHARED_DIR/docker/extensions/druid-parquet-extensions
# For druid-orc-extensions
# Using cp so that this extensions is included when running Druid without loadList and as a option for the loadList
mkdir -p $SHARED_DIR/docker/extensions/druid-orc-extensions
cp $SHARED_DIR/docker/lib/druid-orc-extensions-* $SHARED_DIR/docker/extensions/druid-orc-extensions
# Extensions for testing are pulled while creating a binary.
# See the 'integration-test' profile in $ROOT/distribution/pom.xml.
# Pull Hadoop dependency if needed
if [ -n "$DRUID_INTEGRATION_TEST_START_HADOOP_DOCKER" ] && [ "$DRUID_INTEGRATION_TEST_START_HADOOP_DOCKER" == true ]

View File

@ -20,75 +20,59 @@ set -e
# for a given test group
getComposeArgs()
{
if [ -z "$DRUID_INTEGRATION_TEST_OVERRIDE_CONFIG_PATH" ]
# Sanity check: DRUID_INTEGRATION_TEST_INDEXER must be "indexer" or "middleManager"
if [ "$DRUID_INTEGRATION_TEST_INDEXER" != "indexer" ] && [ "$DRUID_INTEGRATION_TEST_INDEXER" != "middleManager" ]
then
# Sanity check: DRUID_INTEGRATION_TEST_INDEXER must be "indexer" or "middleManager"
if [ "$DRUID_INTEGRATION_TEST_INDEXER" != "indexer" ] && [ "$DRUID_INTEGRATION_TEST_INDEXER" != "middleManager" ]
echo "DRUID_INTEGRATION_TEST_INDEXER must be 'indexer' or 'middleManager' (is '$DRUID_INTEGRATION_TEST_INDEXER')"
exit 1
fi
if [ "$DRUID_INTEGRATION_TEST_INDEXER" = "indexer" ]
then
# Sanity check: cannot combine CliIndexer tests with security, query-retry tests
if [ "$DRUID_INTEGRATION_TEST_GROUP" = "security" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "ldap-security" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "query-retry" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "query-error" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "high-availability" ]
then
echo "DRUID_INTEGRATION_TEST_INDEXER must be 'indexer' or 'middleManager' (is '$DRUID_INTEGRATION_TEST_INDEXER')"
echo "Cannot run test group '$DRUID_INTEGRATION_TEST_GROUP' with CliIndexer"
exit 1
fi
if [ "$DRUID_INTEGRATION_TEST_INDEXER" = "indexer" ]
then
# Sanity check: cannot combine CliIndexer tests with security, query-retry tests
if [ "$DRUID_INTEGRATION_TEST_GROUP" = "security" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "ldap-security" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "query-retry" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "query-error" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "high-availability" ]
then
echo "Cannot run test group '$DRUID_INTEGRATION_TEST_GROUP' with CliIndexer"
exit 1
elif [ "$DRUID_INTEGRATION_TEST_GROUP" = "kafka-data-format" ]
then
# Replace MiddleManager with Indexer + schema registry container
echo "-f ${DOCKERDIR}/docker-compose.cli-indexer.yml -f ${DOCKERDIR}/docker-compose.schema-registry-indexer.yml"
else
# Replace MiddleManager with Indexer
echo "-f ${DOCKERDIR}/docker-compose.cli-indexer.yml"
fi
elif [ "$DRUID_INTEGRATION_TEST_GROUP" = "security" ]
then
# default + additional druid router (custom-check-tls, permissive-tls, no-client-auth-tls)
echo "-f ${DOCKERDIR}/docker-compose.yml -f ${DOCKERDIR}/docker-compose.security.yml"
elif [ "$DRUID_INTEGRATION_TEST_GROUP" = "ldap-security" ]
then
# default + additional druid router (custom-check-tls, permissive-tls, no-client-auth-tls)
echo "-f ${DOCKERDIR}/docker-compose.yml -f ${DOCKERDIR}/docker-compose.ldap-security.yml"
elif [ "$DRUID_INTEGRATION_TEST_GROUP" = "query-retry" ]
then
# default + additional historical modified for query retry test
# See CliHistoricalForQueryRetryTest.
echo "-f ${DOCKERDIR}/docker-compose.query-retry-test.yml"
elif [ "$DRUID_INTEGRATION_TEST_GROUP" = "query-error" ]
then
# default + additional historical modified for query error test
# See CliHistoricalForQueryRetryTest.
echo "-f ${DOCKERDIR}/docker-compose.query-error-test.yml"
elif [ "$DRUID_INTEGRATION_TEST_GROUP" = "custom-coordinator-duties" ]
then
# default + custom for Coordinator to enable custom coordinator duties
echo "-f ${DOCKERDIR}/docker-compose.custom-coordinator-duties.yml"
elif [ "$DRUID_INTEGRATION_TEST_GROUP" = "high-availability" ]
then
# the 'high availability' test cluster with multiple coordinators and overlords
echo "-f ${DOCKERDIR}/docker-compose.high-availability.yml"
elif [ "$DRUID_INTEGRATION_TEST_GROUP" = "kafka-data-format" ]
then
# default + schema registry container
echo "-f ${DOCKERDIR}/docker-compose.yml -f ${DOCKERDIR}/docker-compose.schema-registry.yml"
elif [ "$DRUID_INTEGRATION_TEST_GROUP" = "shuffle-deep-store" ]
then
# default + schema registry container
echo "-f ${DOCKERDIR}/docker-compose.shuffle-deep-store.yml"
# Replace MiddleManager with Indexer + schema registry container
echo "-f ${DOCKERDIR}/docker-compose.cli-indexer.yml -f ${DOCKERDIR}/docker-compose.schema-registry-indexer.yml"
else
# default
echo "-f ${DOCKERDIR}/docker-compose.yml"
# Replace MiddleManager with Indexer
echo "-f ${DOCKERDIR}/docker-compose.cli-indexer.yml"
fi
elif [ "$DRUID_INTEGRATION_TEST_GROUP" = "security" ]
then
# default + additional druid router (custom-check-tls, permissive-tls, no-client-auth-tls)
echo "-f ${DOCKERDIR}/docker-compose.yml -f ${DOCKERDIR}/docker-compose.security.yml"
elif [ "$DRUID_INTEGRATION_TEST_GROUP" = "ldap-security" ]
then
# default + additional druid router (custom-check-tls, permissive-tls, no-client-auth-tls)
echo "-f ${DOCKERDIR}/docker-compose.yml -f ${DOCKERDIR}/docker-compose.ldap-security.yml"
elif [ "$DRUID_INTEGRATION_TEST_GROUP" = "query-retry" ]
then
# default + additional historical modified for query retry test
# See CliHistoricalForQueryRetryTest.
echo "-f ${DOCKERDIR}/docker-compose.query-retry-test.yml"
elif [ "$DRUID_INTEGRATION_TEST_GROUP" = "query-error" ]
then
# default + additional historical modified for query error test
# See CliHistoricalForQueryRetryTest.
echo "-f ${DOCKERDIR}/docker-compose.query-error-test.yml"
elif [ "$DRUID_INTEGRATION_TEST_GROUP" = "high-availability" ]
then
# the 'high availability' test cluster with multiple coordinators and overlords
echo "-f ${DOCKERDIR}/docker-compose.high-availability.yml"
elif [ "$DRUID_INTEGRATION_TEST_GROUP" = "kafka-data-format" ]
then
# default + schema registry container
echo "-f ${DOCKERDIR}/docker-compose.yml -f ${DOCKERDIR}/docker-compose.schema-registry.yml"
elif [ "$DRUID_INTEGRATION_TEST_GROUP" = "kinesis-data-format" ]
then
# default + with override config + schema registry container
echo "-f ${DOCKERDIR}/docker-compose.yml -f ${DOCKERDIR}/docker-compose.schema-registry.yml"
else
if [ "$DRUID_INTEGRATION_TEST_GROUP" = "kinesis-data-format" ]
then
# default + with override config + schema registry container
echo "-f ${DOCKERDIR}/docker-compose.override-env.yml -f ${DOCKERDIR}/docker-compose.schema-registry.yml"
else
# with override config
echo "-f ${DOCKERDIR}/docker-compose.override-env.yml"
fi
# default
echo "-f ${DOCKERDIR}/docker-compose.yml"
fi
}

View File

@ -47,9 +47,11 @@ fi
if [ -z "$DRUID_INTEGRATION_TEST_OVERRIDE_CONFIG_PATH" ]
then
# Start Druid cluster
docker-compose $(getComposeArgs) up -d
echo "Starting cluster with empty config"
OVERRIDE_ENV=environment-configs/empty-config docker-compose $(getComposeArgs) up -d
else
# run druid cluster with override config
echo "Starting cluster with a config file at $DRUID_INTEGRATION_TEST_OVERRIDE_CONFIG_PATH"
OVERRIDE_ENV=$DRUID_INTEGRATION_TEST_OVERRIDE_CONFIG_PATH docker-compose $(getComposeArgs) up -d
fi
}

View File

@ -104,7 +104,7 @@ public class OverlordResourceTestClient
response.getContent(), JacksonUtils.TYPE_REFERENCE_MAP_STRING_STRING
);
String taskID = responseData.get("task");
LOG.info("Submitted task with TaskID[%s]", taskID);
LOG.debug("Submitted task with TaskID[%s]", taskID);
return taskID;
},
Predicates.alwaysTrue(),
@ -127,7 +127,7 @@ public class OverlordResourceTestClient
StringUtils.urlEncode(taskID)
)
);
LOG.info("Index status response" + response.getContent());
LOG.debug("Index status response" + response.getContent());
TaskStatusResponse taskStatusResponse = jsonMapper.readValue(
response.getContent(),
new TypeReference<TaskStatusResponse>()
@ -180,7 +180,7 @@ public class OverlordResourceTestClient
HttpMethod.GET,
StringUtils.format("%s%s", getIndexerURL(), identifier)
);
LOG.info("Tasks %s response %s", identifier, response.getContent());
LOG.debug("Tasks %s response %s", identifier, response.getContent());
return jsonMapper.readValue(
response.getContent(), new TypeReference<List<TaskResponseObject>>()
{
@ -199,7 +199,7 @@ public class OverlordResourceTestClient
HttpMethod.GET,
StringUtils.format("%stask/%s", getIndexerURL(), taskId)
);
LOG.info("Task %s response %s", taskId, response.getContent());
LOG.debug("Task %s response %s", taskId, response.getContent());
return jsonMapper.readValue(
response.getContent(), new TypeReference<TaskPayloadResponse>()
{
@ -380,7 +380,7 @@ public class OverlordResourceTestClient
response.getContent(), JacksonUtils.TYPE_REFERENCE_MAP_STRING_STRING
);
String id = responseData.get("id");
LOG.info("Submitted supervisor with id[%s]", id);
LOG.debug("Submitted supervisor with id[%s]", id);
return id;
}
catch (Exception e) {
@ -409,7 +409,7 @@ public class OverlordResourceTestClient
response.getContent()
);
}
LOG.info("Shutdown supervisor with id[%s]", id);
LOG.debug("Shutdown supervisor with id[%s]", id);
}
catch (ISE e) {
throw e;
@ -440,7 +440,7 @@ public class OverlordResourceTestClient
response.getContent()
);
}
LOG.info("Terminate supervisor with id[%s]", id);
LOG.debug("Terminate supervisor with id[%s]", id);
}
catch (ISE e) {
throw e;
@ -471,7 +471,7 @@ public class OverlordResourceTestClient
response.getContent()
);
}
LOG.info("Shutdown task with id[%s]", id);
LOG.debug("Shutdown task with id[%s]", id);
}
catch (ISE e) {
throw e;
@ -511,7 +511,7 @@ public class OverlordResourceTestClient
JacksonUtils.TYPE_REFERENCE_MAP_STRING_OBJECT
);
String state = (String) payload.get("state");
LOG.info("Supervisor id[%s] has state [%s]", id, state);
LOG.debug("Supervisor id[%s] has state [%s]", id, state);
return SupervisorStateManager.BasicState.valueOf(state);
}
catch (ISE e) {
@ -543,7 +543,7 @@ public class OverlordResourceTestClient
response.getContent()
);
}
LOG.info("Suspended supervisor with id[%s]", id);
LOG.debug("Suspended supervisor with id[%s]", id);
}
catch (ISE e) {
throw e;
@ -574,7 +574,7 @@ public class OverlordResourceTestClient
response.getContent()
);
}
LOG.info("stats supervisor with id[%s]", id);
LOG.debug("stats supervisor with id[%s]", id);
}
catch (ISE e) {
throw e;
@ -605,7 +605,7 @@ public class OverlordResourceTestClient
response.getContent()
);
}
LOG.info("get supervisor health with id[%s]", id);
LOG.debug("get supervisor health with id[%s]", id);
}
catch (ISE e) {
throw e;
@ -636,7 +636,7 @@ public class OverlordResourceTestClient
response.getContent()
);
}
LOG.info("Resumed supervisor with id[%s]", id);
LOG.debug("Resumed supervisor with id[%s]", id);
}
catch (ISE e) {
throw e;
@ -667,7 +667,7 @@ public class OverlordResourceTestClient
response.getContent()
);
}
LOG.info("Reset supervisor with id[%s]", id);
LOG.debug("Reset supervisor with id[%s]", id);
}
catch (ISE e) {
throw e;

View File

@ -101,7 +101,7 @@ public class ITSqlCancelTest
1000
);
if (!responseStatus.equals(HttpResponseStatus.ACCEPTED)) {
throw new RE("Failed to cancel query [%s]", queryId);
throw new RE("Failed to cancel query [%s]. Response code was [%s]", queryId, responseStatus);
}
for (Future<StatusResponseHolder> queryResponseFuture : queryResponseFutures) {
@ -141,7 +141,7 @@ public class ITSqlCancelTest
final StatusResponseHolder queryResponse = queryResponseFuture.get(30, TimeUnit.SECONDS);
if (!queryResponse.getStatus().equals(HttpResponseStatus.OK)) {
throw new ISE("Query is not canceled after cancel request");
throw new ISE("Cancel request failed with status[%s] and content[%s]", queryResponse.getStatus(), queryResponse.getContent());
}
}
}

View File

@ -26,6 +26,7 @@ then
exit 0
fi
rm -rf $(dirname "$0")/../apache-druid-$DRUID_VERSION
# stop hadoop container if it exists (can't use docker-compose down because it shares network)
HADOOP_CONTAINER="$(docker ps -aq -f name=druid-it-hadoop)"
@ -38,7 +39,7 @@ fi
# bring down using the same compose args we started with
if [ -z "$DRUID_INTEGRATION_TEST_OVERRIDE_CONFIG_PATH" ]
then
docker-compose $(getComposeArgs) down
OVERRIDE_ENV=environment-configs/empty-config docker-compose $(getComposeArgs) down
else
OVERRIDE_ENV=$DRUID_INTEGRATION_TEST_OVERRIDE_CONFIG_PATH docker-compose $(getComposeArgs) down
fi