mirror of https://github.com/apache/druid.git
Migrate to use docker compose v2 (#16232)
https://github.com/actions/runner-images/issues/9557
This commit is contained in:
parent
4bea865697
commit
1df41db46d
|
@ -43,7 +43,7 @@ To build images on Apple M1/M2, you need to follow the instructions in this sect
|
|||
2. Edit `environment` file to suite if necessary.
|
||||
3. Run:
|
||||
```bash
|
||||
docker-compose -f distribution/docker/docker-compose.yml up
|
||||
docker compose -f distribution/docker/docker-compose.yml up
|
||||
```
|
||||
|
||||
## MySQL Database Connector
|
||||
|
|
|
@ -113,7 +113,7 @@ Note that Druid uses port 8888 for the console. This port is also used by Jupyte
|
|||
|
||||
`cd` into the directory that contains the configuration files. This is the directory you created above, or the `distribution/docker/` in your Druid installation directory if you installed Druid locally.
|
||||
|
||||
Run `docker-compose up` to launch the cluster with a shell attached, or `docker-compose up -d` to run the cluster in the background.
|
||||
Run `docker compose up` to launch the cluster with a shell attached, or `docker compose up -d` to run the cluster in the background.
|
||||
|
||||
Once the cluster has started, you can navigate to the [web console](../operations/web-console.md) at [http://localhost:8888](http://localhost:8888). The [Druid router process](../design/router.md) serves the UI.
|
||||
|
||||
|
@ -133,5 +133,5 @@ docker exec -ti <id> sh
|
|||
|
||||
Where `<id>` is the container id found with `docker ps`. Druid is installed in `/opt/druid`. The [script](https://github.com/apache/druid/blob/{{DRUIDVERSION}}/distribution/docker/druid.sh) which consumes the environment variables mentioned above, and which launches Druid, is located at `/druid.sh`.
|
||||
|
||||
Run `docker-compose down` to shut down the cluster. Your data is persisted as a set of [Docker volumes](https://docs.docker.com/storage/volumes/) and will be available when you restart your Druid cluster.
|
||||
Run `docker compose down` to shut down the cluster. Your data is persisted as a set of [Docker volumes](https://docs.docker.com/storage/volumes/) and will be available when you restart your Druid cluster.
|
||||
|
||||
|
|
|
@ -117,7 +117,7 @@ service:
|
|||
Run otel-collector and zipkin.
|
||||
|
||||
```bash
|
||||
docker-compose up
|
||||
docker compose up
|
||||
```
|
||||
|
||||
### Part 2: Run Druid
|
||||
|
|
|
@ -81,7 +81,7 @@ function category {
|
|||
# shared folder.
|
||||
|
||||
# DRUID_INTEGRATION_TEST_GROUP is used in
|
||||
# docker-compose files and here. Despite the name, it is the
|
||||
# docker compose files and here. Despite the name, it is the
|
||||
# name of the cluster configuration we want to run, not the
|
||||
# test category. Multiple categories can map to the same cluster
|
||||
# definition.
|
||||
|
@ -142,7 +142,7 @@ function build_shared_dir {
|
|||
sudo chmod -R a+rwx $SHARED_DIR
|
||||
}
|
||||
|
||||
# Either generate the docker-compose file, or use "static" versions.
|
||||
# Either generate the docker compose file, or use "static" versions.
|
||||
function docker_file {
|
||||
|
||||
# If a template exists, generate the docker-compose.yaml file.
|
||||
|
@ -214,7 +214,7 @@ function run_setup {
|
|||
fi
|
||||
}
|
||||
|
||||
# Determine if docker-compose is available. If not, assume Docker supports
|
||||
# Determine if docker compose is available. If not, assume Docker supports
|
||||
# the compose subcommand
|
||||
set +e
|
||||
if which docker-compose > /dev/null
|
||||
|
|
|
@ -132,28 +132,28 @@ Maven to only look locally for snapshot jars.
|
|||
|
||||
> **NOTE**: `-Ddocker.build.hadoop=true` is optional if you don't run tests against Hadoop.
|
||||
|
||||
2. Choose a docker-compose file to start containers.
|
||||
2. Choose a docker compose file to start containers.
|
||||
|
||||
There are a few different Docker compose yamls located in "docker" folder that could be used to start containers for different tests.
|
||||
|
||||
- To start basic Druid cluster (skip this if running Druid cluster with override configs):
|
||||
```bash
|
||||
docker-compose -f integration-tests/docker/docker-compose.yml up
|
||||
docker compose -f integration-tests/docker/docker-compose.yml up
|
||||
```
|
||||
|
||||
- To start Druid cluster with override configs
|
||||
```bash
|
||||
OVERRIDE_ENV=<PATH_TO_ENV> docker-compose -f docker-compose.yml up
|
||||
OVERRIDE_ENV=<PATH_TO_ENV> docker compose -f docker-compose.yml up
|
||||
```
|
||||
|
||||
- To start tests against Hadoop
|
||||
```bash
|
||||
docker-compose -f docker-compose.druid-hadoop.yml up
|
||||
docker compose -f docker-compose.druid-hadoop.yml up
|
||||
```
|
||||
|
||||
- To start tests againt security group
|
||||
```bash
|
||||
docker-compose -f docker-compose.yml -f docker-compose.security.yml up
|
||||
docker compose -f docker-compose.yml -f docker-compose.security.yml up
|
||||
```
|
||||
|
||||
3. Run tests.
|
||||
|
@ -195,9 +195,9 @@ The values shown above are for the default docker compose cluster. For other clu
|
|||
Defines a Druid cluster with default configuration that is used for running integration tests.
|
||||
|
||||
```bash
|
||||
docker-compose -f docker-compose.yml up
|
||||
docker compose -f docker-compose.yml up
|
||||
# DRUID_INTEGRATION_TEST_GROUP - an environment variable that specifies the integration test group to run.
|
||||
DRUID_INTEGRATION_TEST_GROUP=batch-index docker-compose -f docker-compose.yml up
|
||||
DRUID_INTEGRATION_TEST_GROUP=batch-index docker compose -f docker-compose.yml up
|
||||
```
|
||||
|
||||
You can change the default configuration using a custom configuration file. The settings in the file will override
|
||||
|
@ -205,7 +205,7 @@ The values shown above are for the default docker compose cluster. For other clu
|
|||
|
||||
```bash
|
||||
# OVERRIDE_ENV - an environment variable that specifies the custom configuration file path.
|
||||
OVERRIDE_ENV=./environment-configs/test-groups/prepopulated-data DRUID_INTEGRATION_TEST_GROUP=query docker-compose -f docker-compose.yml up
|
||||
OVERRIDE_ENV=./environment-configs/test-groups/prepopulated-data DRUID_INTEGRATION_TEST_GROUP=query docker compose -f docker-compose.yml up
|
||||
```
|
||||
|
||||
- docker-compose.security.yml
|
||||
|
@ -214,7 +214,7 @@ The values shown above are for the default docker compose cluster. For other clu
|
|||
This is meant to be used together with docker-compose.yml and is only needed for the "security" group integration test.
|
||||
|
||||
```bash
|
||||
docker-compose -f docker-compose.yml -f docker-compose.security.yml up
|
||||
docker compose -f docker-compose.yml -f docker-compose.security.yml up
|
||||
```
|
||||
|
||||
- docker-compose.druid-hadoop.yml
|
||||
|
@ -222,7 +222,7 @@ The values shown above are for the default docker compose cluster. For other clu
|
|||
For starting Apache Hadoop 3.3.6 cluster with the same setup as the Druid tutorial.
|
||||
|
||||
```bash
|
||||
docker-compose -f docker-compose.druid-hadoop.yml up
|
||||
docker compose -f docker-compose.druid-hadoop.yml up
|
||||
```
|
||||
|
||||
## Tips & tricks for debugging and developing integration tests
|
||||
|
|
|
@ -41,17 +41,17 @@ fi
|
|||
if [ -n "$DRUID_INTEGRATION_TEST_START_HADOOP_DOCKER" ] && [ "$DRUID_INTEGRATION_TEST_START_HADOOP_DOCKER" == true ]
|
||||
then
|
||||
# Start Hadoop docker container
|
||||
docker-compose -f ${DOCKERDIR}/docker-compose.druid-hadoop.yml up -d
|
||||
docker compose -f ${DOCKERDIR}/docker-compose.druid-hadoop.yml up -d
|
||||
fi
|
||||
|
||||
if [ -z "$DRUID_INTEGRATION_TEST_OVERRIDE_CONFIG_PATH" ]
|
||||
then
|
||||
# Start Druid cluster
|
||||
echo "Starting cluster with empty config"
|
||||
OVERRIDE_ENV=environment-configs/empty-config docker-compose $(getComposeArgs) up -d
|
||||
OVERRIDE_ENV=environment-configs/empty-config docker compose $(getComposeArgs) up -d
|
||||
else
|
||||
# run druid cluster with override config
|
||||
echo "Starting cluster with a config file at $DRUID_INTEGRATION_TEST_OVERRIDE_CONFIG_PATH"
|
||||
OVERRIDE_ENV=$DRUID_INTEGRATION_TEST_OVERRIDE_CONFIG_PATH docker-compose $(getComposeArgs) up -d
|
||||
OVERRIDE_ENV=$DRUID_INTEGRATION_TEST_OVERRIDE_CONFIG_PATH docker compose $(getComposeArgs) up -d
|
||||
fi
|
||||
}
|
||||
|
|
|
@ -28,7 +28,7 @@ fi
|
|||
|
||||
rm -rf $(dirname "$0")/../apache-druid-$DRUID_VERSION
|
||||
|
||||
# stop hadoop container if it exists (can't use docker-compose down because it shares network)
|
||||
# stop hadoop container if it exists (can't use docker compose down because it shares network)
|
||||
HADOOP_CONTAINER="$(docker ps -aq -f name=druid-it-hadoop)"
|
||||
if [ ! -z "$HADOOP_CONTAINER" ]
|
||||
then
|
||||
|
@ -39,9 +39,9 @@ fi
|
|||
# bring down using the same compose args we started with
|
||||
if [ -z "$DRUID_INTEGRATION_TEST_OVERRIDE_CONFIG_PATH" ]
|
||||
then
|
||||
OVERRIDE_ENV=environment-configs/empty-config docker-compose $(getComposeArgs) down
|
||||
OVERRIDE_ENV=environment-configs/empty-config docker compose $(getComposeArgs) down
|
||||
else
|
||||
OVERRIDE_ENV=$DRUID_INTEGRATION_TEST_OVERRIDE_CONFIG_PATH docker-compose $(getComposeArgs) down
|
||||
OVERRIDE_ENV=$DRUID_INTEGRATION_TEST_OVERRIDE_CONFIG_PATH docker compose $(getComposeArgs) down
|
||||
fi
|
||||
|
||||
if [ ! -z "$(docker network ls -q -f name=druid-it-net)" ]
|
||||
|
|
Loading…
Reference in New Issue