From 1df41db46d29978a5930e9b2322cdb5680d5f3d8 Mon Sep 17 00:00:00 2001 From: Zoltan Haindrich Date: Wed, 3 Apr 2024 12:32:55 +0200 Subject: [PATCH] Migrate to use docker compose v2 (#16232) https://github.com/actions/runner-images/issues/9557 --- distribution/docker/README.md | 2 +- docs/tutorials/docker.md | 4 ++-- .../opentelemetry-emitter/README.md | 2 +- integration-tests-ex/cases/cluster.sh | 6 +++--- integration-tests/README.md | 20 +++++++++---------- .../script/docker_run_cluster.sh | 6 +++--- integration-tests/stop_cluster.sh | 6 +++--- 7 files changed, 23 insertions(+), 23 deletions(-) diff --git a/distribution/docker/README.md b/distribution/docker/README.md index 4c03e230b62..7be1529c02f 100644 --- a/distribution/docker/README.md +++ b/distribution/docker/README.md @@ -43,7 +43,7 @@ To build images on Apple M1/M2, you need to follow the instructions in this sect 2. Edit `environment` file to suite if necessary. 3. Run: ```bash - docker-compose -f distribution/docker/docker-compose.yml up + docker compose -f distribution/docker/docker-compose.yml up ``` ## MySQL Database Connector diff --git a/docs/tutorials/docker.md b/docs/tutorials/docker.md index c77abda0392..d77e89832cb 100644 --- a/docs/tutorials/docker.md +++ b/docs/tutorials/docker.md @@ -113,7 +113,7 @@ Note that Druid uses port 8888 for the console. This port is also used by Jupyte `cd` into the directory that contains the configuration files. This is the directory you created above, or the `distribution/docker/` in your Druid installation directory if you installed Druid locally. -Run `docker-compose up` to launch the cluster with a shell attached, or `docker-compose up -d` to run the cluster in the background. +Run `docker compose up` to launch the cluster with a shell attached, or `docker compose up -d` to run the cluster in the background. Once the cluster has started, you can navigate to the [web console](../operations/web-console.md) at [http://localhost:8888](http://localhost:8888). The [Druid router process](../design/router.md) serves the UI. @@ -133,5 +133,5 @@ docker exec -ti sh Where `` is the container id found with `docker ps`. Druid is installed in `/opt/druid`. The [script](https://github.com/apache/druid/blob/{{DRUIDVERSION}}/distribution/docker/druid.sh) which consumes the environment variables mentioned above, and which launches Druid, is located at `/druid.sh`. -Run `docker-compose down` to shut down the cluster. Your data is persisted as a set of [Docker volumes](https://docs.docker.com/storage/volumes/) and will be available when you restart your Druid cluster. +Run `docker compose down` to shut down the cluster. Your data is persisted as a set of [Docker volumes](https://docs.docker.com/storage/volumes/) and will be available when you restart your Druid cluster. diff --git a/extensions-contrib/opentelemetry-emitter/README.md b/extensions-contrib/opentelemetry-emitter/README.md index ce5639aafa9..91e538f35a9 100644 --- a/extensions-contrib/opentelemetry-emitter/README.md +++ b/extensions-contrib/opentelemetry-emitter/README.md @@ -117,7 +117,7 @@ service: Run otel-collector and zipkin. ```bash -docker-compose up +docker compose up ``` ### Part 2: Run Druid diff --git a/integration-tests-ex/cases/cluster.sh b/integration-tests-ex/cases/cluster.sh index dfe1037b43c..1ff1cdf1307 100755 --- a/integration-tests-ex/cases/cluster.sh +++ b/integration-tests-ex/cases/cluster.sh @@ -81,7 +81,7 @@ function category { # shared folder. # DRUID_INTEGRATION_TEST_GROUP is used in - # docker-compose files and here. Despite the name, it is the + # docker compose files and here. Despite the name, it is the # name of the cluster configuration we want to run, not the # test category. Multiple categories can map to the same cluster # definition. @@ -142,7 +142,7 @@ function build_shared_dir { sudo chmod -R a+rwx $SHARED_DIR } -# Either generate the docker-compose file, or use "static" versions. +# Either generate the docker compose file, or use "static" versions. function docker_file { # If a template exists, generate the docker-compose.yaml file. @@ -214,7 +214,7 @@ function run_setup { fi } -# Determine if docker-compose is available. If not, assume Docker supports +# Determine if docker compose is available. If not, assume Docker supports # the compose subcommand set +e if which docker-compose > /dev/null diff --git a/integration-tests/README.md b/integration-tests/README.md index 4dfaaa3f293..eedc3b94b23 100644 --- a/integration-tests/README.md +++ b/integration-tests/README.md @@ -132,28 +132,28 @@ Maven to only look locally for snapshot jars. > **NOTE**: `-Ddocker.build.hadoop=true` is optional if you don't run tests against Hadoop. -2. Choose a docker-compose file to start containers. +2. Choose a docker compose file to start containers. There are a few different Docker compose yamls located in "docker" folder that could be used to start containers for different tests. - To start basic Druid cluster (skip this if running Druid cluster with override configs): ```bash - docker-compose -f integration-tests/docker/docker-compose.yml up + docker compose -f integration-tests/docker/docker-compose.yml up ``` - To start Druid cluster with override configs ```bash - OVERRIDE_ENV= docker-compose -f docker-compose.yml up + OVERRIDE_ENV= docker compose -f docker-compose.yml up ``` - To start tests against Hadoop ```bash - docker-compose -f docker-compose.druid-hadoop.yml up + docker compose -f docker-compose.druid-hadoop.yml up ``` - To start tests againt security group ```bash - docker-compose -f docker-compose.yml -f docker-compose.security.yml up + docker compose -f docker-compose.yml -f docker-compose.security.yml up ``` 3. Run tests. @@ -195,9 +195,9 @@ The values shown above are for the default docker compose cluster. For other clu Defines a Druid cluster with default configuration that is used for running integration tests. ```bash - docker-compose -f docker-compose.yml up + docker compose -f docker-compose.yml up # DRUID_INTEGRATION_TEST_GROUP - an environment variable that specifies the integration test group to run. - DRUID_INTEGRATION_TEST_GROUP=batch-index docker-compose -f docker-compose.yml up + DRUID_INTEGRATION_TEST_GROUP=batch-index docker compose -f docker-compose.yml up ``` You can change the default configuration using a custom configuration file. The settings in the file will override @@ -205,7 +205,7 @@ The values shown above are for the default docker compose cluster. For other clu ```bash # OVERRIDE_ENV - an environment variable that specifies the custom configuration file path. - OVERRIDE_ENV=./environment-configs/test-groups/prepopulated-data DRUID_INTEGRATION_TEST_GROUP=query docker-compose -f docker-compose.yml up + OVERRIDE_ENV=./environment-configs/test-groups/prepopulated-data DRUID_INTEGRATION_TEST_GROUP=query docker compose -f docker-compose.yml up ``` - docker-compose.security.yml @@ -214,7 +214,7 @@ The values shown above are for the default docker compose cluster. For other clu This is meant to be used together with docker-compose.yml and is only needed for the "security" group integration test. ```bash - docker-compose -f docker-compose.yml -f docker-compose.security.yml up + docker compose -f docker-compose.yml -f docker-compose.security.yml up ``` - docker-compose.druid-hadoop.yml @@ -222,7 +222,7 @@ The values shown above are for the default docker compose cluster. For other clu For starting Apache Hadoop 3.3.6 cluster with the same setup as the Druid tutorial. ```bash - docker-compose -f docker-compose.druid-hadoop.yml up + docker compose -f docker-compose.druid-hadoop.yml up ``` ## Tips & tricks for debugging and developing integration tests diff --git a/integration-tests/script/docker_run_cluster.sh b/integration-tests/script/docker_run_cluster.sh index 03eb735d2f1..775e56d6de5 100755 --- a/integration-tests/script/docker_run_cluster.sh +++ b/integration-tests/script/docker_run_cluster.sh @@ -41,17 +41,17 @@ fi if [ -n "$DRUID_INTEGRATION_TEST_START_HADOOP_DOCKER" ] && [ "$DRUID_INTEGRATION_TEST_START_HADOOP_DOCKER" == true ] then # Start Hadoop docker container - docker-compose -f ${DOCKERDIR}/docker-compose.druid-hadoop.yml up -d + docker compose -f ${DOCKERDIR}/docker-compose.druid-hadoop.yml up -d fi if [ -z "$DRUID_INTEGRATION_TEST_OVERRIDE_CONFIG_PATH" ] then # Start Druid cluster echo "Starting cluster with empty config" - OVERRIDE_ENV=environment-configs/empty-config docker-compose $(getComposeArgs) up -d + OVERRIDE_ENV=environment-configs/empty-config docker compose $(getComposeArgs) up -d else # run druid cluster with override config echo "Starting cluster with a config file at $DRUID_INTEGRATION_TEST_OVERRIDE_CONFIG_PATH" - OVERRIDE_ENV=$DRUID_INTEGRATION_TEST_OVERRIDE_CONFIG_PATH docker-compose $(getComposeArgs) up -d + OVERRIDE_ENV=$DRUID_INTEGRATION_TEST_OVERRIDE_CONFIG_PATH docker compose $(getComposeArgs) up -d fi } diff --git a/integration-tests/stop_cluster.sh b/integration-tests/stop_cluster.sh index b88fb91a8fb..5dcabbb8321 100755 --- a/integration-tests/stop_cluster.sh +++ b/integration-tests/stop_cluster.sh @@ -28,7 +28,7 @@ fi rm -rf $(dirname "$0")/../apache-druid-$DRUID_VERSION -# stop hadoop container if it exists (can't use docker-compose down because it shares network) +# stop hadoop container if it exists (can't use docker compose down because it shares network) HADOOP_CONTAINER="$(docker ps -aq -f name=druid-it-hadoop)" if [ ! -z "$HADOOP_CONTAINER" ] then @@ -39,9 +39,9 @@ fi # bring down using the same compose args we started with if [ -z "$DRUID_INTEGRATION_TEST_OVERRIDE_CONFIG_PATH" ] then - OVERRIDE_ENV=environment-configs/empty-config docker-compose $(getComposeArgs) down + OVERRIDE_ENV=environment-configs/empty-config docker compose $(getComposeArgs) down else - OVERRIDE_ENV=$DRUID_INTEGRATION_TEST_OVERRIDE_CONFIG_PATH docker-compose $(getComposeArgs) down + OVERRIDE_ENV=$DRUID_INTEGRATION_TEST_OVERRIDE_CONFIG_PATH docker compose $(getComposeArgs) down fi if [ ! -z "$(docker network ls -q -f name=druid-it-net)" ]