mirror of https://github.com/apache/druid.git
Extend the IT framework to allow tests in extensions (#13877)
The "new" IT framework provides a convenient way to package and run integration tests (ITs), but only for core modules. We have a use case to run an IT for a contrib extension: the proposed gRPC query extension. This PR provides the IT framework functionality to allow non-core ITs.
This commit is contained in:
parent
10bce22e68
commit
3c0983c8e9
|
@ -27,15 +27,17 @@ set -e
|
|||
# Enable for debugging
|
||||
#set -x
|
||||
|
||||
export MODULE_DIR=$(cd $(dirname $0) && pwd)
|
||||
export BASE_MODULE_DIR=$(cd $(dirname $0) && pwd)
|
||||
|
||||
# The location of the tests, which may be different than
|
||||
# the location of this file.
|
||||
export MODULE_DIR=${IT_MODULE_DIR:-$BASE_MODULE_DIR}
|
||||
|
||||
function usage {
|
||||
cat <<EOF
|
||||
Usage: $0 cmd [category]
|
||||
-h, help
|
||||
Display this message
|
||||
prepare category
|
||||
Generate the docker-compose.yaml file for the category for debugging.
|
||||
up category
|
||||
Start the cluster
|
||||
down category
|
||||
|
@ -45,7 +47,7 @@ Usage: $0 cmd [category]
|
|||
compose-cmd category
|
||||
Pass the command to Docker compose. Cluster should already be up.
|
||||
gen category
|
||||
Generate docker-compose.yaml files (only.) Done automatically as
|
||||
Generate docker-compose.yaml file (only.) Done automatically as
|
||||
part of up. Use only for debugging.
|
||||
EOF
|
||||
}
|
||||
|
@ -60,7 +62,7 @@ CMD=$1
|
|||
shift
|
||||
|
||||
function check_env_file {
|
||||
export ENV_FILE=$MODULE_DIR/../image/target/env.sh
|
||||
export ENV_FILE=$BASE_MODULE_DIR/../image/target/env.sh
|
||||
if [ ! -f $ENV_FILE ]; then
|
||||
echo "Please build the Docker test image before testing" 1>&2
|
||||
exit 1
|
||||
|
@ -127,33 +129,33 @@ function show_status {
|
|||
function build_shared_dir {
|
||||
mkdir -p $SHARED_DIR
|
||||
# Must start with an empty DB to keep MySQL happy
|
||||
rm -rf $SHARED_DIR/db
|
||||
sudo rm -rf $SHARED_DIR/db
|
||||
mkdir -p $SHARED_DIR/logs
|
||||
mkdir -p $SHARED_DIR/tasklogs
|
||||
mkdir -p $SHARED_DIR/db
|
||||
mkdir -p $SHARED_DIR/kafka
|
||||
mkdir -p $SHARED_DIR/resources
|
||||
cp $MODULE_DIR/assets/log4j2.xml $SHARED_DIR/resources
|
||||
cp $BASE_MODULE_DIR/assets/log4j2.xml $SHARED_DIR/resources
|
||||
# Permissions in some build setups are screwed up. See above. The user
|
||||
# which runs Docker does not have permission to write into the /shared
|
||||
# directory. Force ownership to allow writing.
|
||||
chmod -R a+rwx $SHARED_DIR
|
||||
sudo chmod -R a+rwx $SHARED_DIR
|
||||
}
|
||||
|
||||
# Either generate the docker-compose file, or use "static" versions.
|
||||
function docker_file {
|
||||
|
||||
# If a template exists, generate the docker-compose.yaml file. Copy over the Common
|
||||
# folder.
|
||||
TEMPLATE_DIR=$MODULE_DIR/templates
|
||||
TEMPLATE_SCRIPT=${DRUID_INTEGRATION_TEST_GROUP}.py
|
||||
if [ -f "$TEMPLATE_DIR/$TEMPLATE_SCRIPT" ]; then
|
||||
# If a template exists, generate the docker-compose.yaml file.
|
||||
# Copy over the Common folder.
|
||||
TEMPLATE_SCRIPT=docker-compose.py
|
||||
if [ -f "$CLUSTER_DIR/$TEMPLATE_SCRIPT" ]; then
|
||||
export PYTHONPATH=$BASE_MODULE_DIR/cluster
|
||||
export COMPOSE_DIR=$TARGET_DIR/cluster/$DRUID_INTEGRATION_TEST_GROUP
|
||||
mkdir -p $COMPOSE_DIR
|
||||
pushd $TEMPLATE_DIR > /dev/null
|
||||
pushd $CLUSTER_DIR > /dev/null
|
||||
python3 $TEMPLATE_SCRIPT
|
||||
popd > /dev/null
|
||||
cp -r $MODULE_DIR/cluster/Common $TARGET_DIR/cluster
|
||||
cp -r $BASE_MODULE_DIR/cluster/Common $TARGET_DIR/cluster
|
||||
else
|
||||
# Else, use the existing non-template file in place.
|
||||
if [ ! -d $CLUSTER_DIR ]; then
|
||||
|
@ -205,6 +207,13 @@ function verify_docker_file {
|
|||
fi
|
||||
}
|
||||
|
||||
function run_setup {
|
||||
SETUP_SCRIPT="$CLUSTER_DIR/setup.sh"
|
||||
if [ -f "$SETUP_SCRIPT" ]; then
|
||||
source "$SETUP_SCRIPT"
|
||||
fi
|
||||
}
|
||||
|
||||
# Determine if docker-compose is available. If not, assume Docker supports
|
||||
# the compose subcommand
|
||||
set +e
|
||||
|
@ -219,17 +228,6 @@ set -e
|
|||
# Print environment for debugging
|
||||
#env
|
||||
|
||||
# Determine if docker-compose is available. If not, assume Docker supports
|
||||
# the compose subcommand
|
||||
set +e
|
||||
if which docker-compose > /dev/null
|
||||
then
|
||||
DOCKER_COMPOSE='docker-compose'
|
||||
else
|
||||
DOCKER_COMPOSE='docker compose'
|
||||
fi
|
||||
set -e
|
||||
|
||||
case $CMD in
|
||||
"-h" )
|
||||
usage
|
||||
|
@ -238,17 +236,11 @@ case $CMD in
|
|||
usage
|
||||
$DOCKER_COMPOSE help
|
||||
;;
|
||||
"prepare" )
|
||||
check_env_file
|
||||
category $*
|
||||
build_shared_dir
|
||||
docker_file
|
||||
;;
|
||||
"gen" )
|
||||
category $*
|
||||
build_shared_dir
|
||||
docker_file
|
||||
echo "Generated file is in $COMPOSE_DIR"
|
||||
echo "Generated file is $COMPOSE_DIR/docker-compose.yaml"
|
||||
;;
|
||||
"up" )
|
||||
check_env_file
|
||||
|
@ -256,6 +248,7 @@ case $CMD in
|
|||
echo "Starting cluster $DRUID_INTEGRATION_TEST_GROUP"
|
||||
build_shared_dir
|
||||
docker_file
|
||||
run_setup
|
||||
cd $COMPOSE_DIR
|
||||
$DOCKER_COMPOSE $DOCKER_ARGS up -d
|
||||
# Enable the following for debugging
|
||||
|
|
|
@ -0,0 +1,19 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#--------------------------------------------------------------------
|
||||
|
||||
require_env_var AZURE_ACCOUNT
|
||||
require_env_var AZURE_KEY
|
||||
require_env_var AZURE_CONTAINER
|
|
@ -71,6 +71,7 @@ services:
|
|||
# platform: linux/x86_64
|
||||
image: mysql:$MYSQL_IMAGE_VERSION
|
||||
container_name: metadata
|
||||
restart: always
|
||||
command:
|
||||
- --character-set-server=utf8mb4
|
||||
networks:
|
||||
|
@ -79,7 +80,7 @@ services:
|
|||
ports:
|
||||
- 3306:3306
|
||||
volumes:
|
||||
- ${SHARED_DIR}/db:/var/lib/mysql
|
||||
- ${SHARED_DIR}/db/init.sql:/docker-entrypoint-initdb.d/init.sql
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: driud
|
||||
MYSQL_DATABASE: druid
|
||||
|
|
|
@ -0,0 +1,23 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#--------------------------------------------------------------------
|
||||
|
||||
require_env_var GOOGLE_BUCKET
|
||||
require_env_var GOOGLE_PREFIX
|
||||
require_env_var GOOGLE_APPLICATION_CREDENTIALS
|
||||
if [ ! -f "$GOOGLE_APPLICATION_CREDENTIALS" ]; then
|
||||
echo "Required file GOOGLE_APPLICATION_CREDENTIALS=$GOOGLE_APPLICATION_CREDENTIALS is missing" 1>&2
|
||||
exit 1
|
||||
fi
|
|
@ -0,0 +1,21 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#--------------------------------------------------------------------
|
||||
|
||||
require_env_var DRUID_CLOUD_BUCKET
|
||||
require_env_var DRUID_CLOUD_PATH
|
||||
require_env_var AWS_REGION
|
||||
require_env_var AWS_ACCESS_KEY_ID
|
||||
require_env_var AWS_SECRET_ACCESS_KEY
|
|
@ -23,7 +23,7 @@ internally as a Python data structure made up of maps, arrays and scalars.
|
|||
PyYaml does the grunt work of converting the data structure to the YAML file.
|
||||
'''
|
||||
|
||||
import yaml, os, os.path
|
||||
import yaml, os
|
||||
from pathlib import Path
|
||||
|
||||
# Constants used frequently in the template.
|
||||
|
@ -49,15 +49,16 @@ def generate(template_path, template):
|
|||
'''
|
||||
|
||||
# Compute the cluster (test category) name from the template path which
|
||||
# we assume to be module/<something>/<template>/<something>.py
|
||||
# we assume to be <module>/templates/<something>.py
|
||||
template_path = Path(template_path)
|
||||
cluster = template_path.stem
|
||||
cluster = template_path.parent.name
|
||||
|
||||
# Move up to the module (that is, the cases folder) relative to the template file.
|
||||
module_dir = Path(__file__).parent.parent
|
||||
# Move up to the module relative to the template file.
|
||||
module_dir = template_path.parent.parent.parent
|
||||
|
||||
# The target location for the output file is <module>/target/cluster/<cluster>/docker-compose.yaml
|
||||
target_dir = module_dir.joinpath("target")
|
||||
os.makedirs(target_dir, exist_ok=True)
|
||||
target_file = target_dir.joinpath('cluster', cluster, 'docker-compose.yaml')
|
||||
|
||||
# Defer back to the template class to create the output into the docker-compose.yaml file.
|
||||
|
@ -205,7 +206,7 @@ class BaseTemplate:
|
|||
def add_property(self, service, prop, value):
|
||||
'''
|
||||
Sets a property for a service. The property is of the same form as the
|
||||
.properties file: druid.some.property.
|
||||
runtime.properties file: druid.some.property.
|
||||
This method converts the property to the env var form so you don't have to.
|
||||
'''
|
||||
var = prop.replace('.', '_')
|
||||
|
@ -230,7 +231,7 @@ class BaseTemplate:
|
|||
Add a port mapping to the service
|
||||
'''
|
||||
ports = service.setdefault('ports', [])
|
||||
ports.append(local + ':' + container)
|
||||
ports.append(str(local) + ':' + str(container))
|
||||
|
||||
def define_external_service(self, name) -> dict:
|
||||
'''
|
|
@ -37,10 +37,38 @@ See also:
|
|||
|
||||
## File Structure
|
||||
|
||||
Docker Compose files live in the `druid-it-cases` module (`test-cases` folder)
|
||||
Docker Compose files live in the `druid-it-cases` module (`cases` folder)
|
||||
in the `cluster` directory. There is a separate subdirectory for each cluster type
|
||||
(subset of test categories), plus a `Common` folder for shared files.
|
||||
|
||||
### Cluster Directory
|
||||
|
||||
Each test category uses an associated cluster. In some cases, multiple tests use
|
||||
the same cluster definition. Each cluster is defined by a directory in
|
||||
`$MODULE/cluster/$CLUSTER_NAME`. The directory contains a variety of files, most
|
||||
of which are optional:
|
||||
|
||||
* `docker-compose.yaml` - Docker composes file, if created explicitly.
|
||||
* `docker-compose.py` - Docker compose "template" if generated. The Python template
|
||||
format is preferred. (One of the `docker-compose.*` files is required)
|
||||
* `verify.sh` - Verify the environment for the cluster. Cloud tests require that a
|
||||
number of environment variables be set to pass keys and other setup to tests.
|
||||
(Optional)
|
||||
* `setup.sh` - Additional cluster setup, such as populating the "shared" directory
|
||||
with test-specific items. (Optional)
|
||||
|
||||
The `verify.sh` and `setup.sh` scripts are sourced into one of the "master"
|
||||
scripts and can thus make use of environment variables already set:
|
||||
|
||||
* `BASE_MODULE_DIR` points to `integration-tests-ex/cases` where the "base" set
|
||||
of scripts and cluster definitions reside.
|
||||
* `MODULE_DIR` points to the Maven module folder that contains the test.
|
||||
* `CATEGORY` gives the name of the test category.
|
||||
* `DRUID_INTEGRATION_TEST_GROUP` is the cluster name. Often the same as `CATEGORY`,
|
||||
but not always.
|
||||
|
||||
The `set -e` option is in effect so that an any errors fail the test.
|
||||
|
||||
## Shared Directory
|
||||
|
||||
Each test has a "shared" directory that is mounted into each container to hold things
|
||||
|
|
|
@ -211,6 +211,37 @@ when it starts. If you start, then restart the MySQL container, you *must*
|
|||
remove the `db` directory before restart or MySQL will fail due to existing
|
||||
files.
|
||||
|
||||
### Per-test Extensions
|
||||
|
||||
The image build includes a standard set of extensions. Contrib or custom extensions
|
||||
may wish to add additional extensions. This is most easily done not by altering the
|
||||
image, but by adding the extensions at cluster startup. If the shared directory has
|
||||
an `extensions` subdirectory, then that directory is added to the extension search
|
||||
path on container startup. To add an extension `my-extension`, your shared directory
|
||||
should look like this:
|
||||
|
||||
```text
|
||||
shared
|
||||
+- ...
|
||||
+- extensions
|
||||
+- my-extension
|
||||
+- my-extension-<version>.jar
|
||||
+- ...
|
||||
```
|
||||
|
||||
The `extensions` directory should be created within the per-cluster `setup.sh` script
|
||||
which is when starting your test cluster.
|
||||
|
||||
Be sure to also include the extension in the load list in your `docker-compose.py` template.
|
||||
To load the extension on all nodes:
|
||||
|
||||
```python
|
||||
def extend_druid_service(self, service):
|
||||
self.add_env(service, 'druid_test_loadList', 'my-extension')
|
||||
```
|
||||
|
||||
Note that the above requires Druid and IT features added in early March, 2023.
|
||||
|
||||
### Third-Party Logs
|
||||
|
||||
The three third-party containers are configured to log to the `/shared`
|
||||
|
|
|
@ -22,15 +22,34 @@
|
|||
# Fail fast on any error
|
||||
set -e
|
||||
|
||||
# Enable for tracing
|
||||
# Enable for debugging
|
||||
#set -x
|
||||
|
||||
# Dump the environment for debugging.
|
||||
# Dump the environment for debugging. Also done later to the log.
|
||||
#env
|
||||
|
||||
# Launch Druid within the container.
|
||||
cd /
|
||||
|
||||
# Where things are located
|
||||
SHARED_DIR=/shared
|
||||
LOG_DIR=$SHARED_DIR/logs
|
||||
DRUID_HOME=/usr/local/druid
|
||||
|
||||
# Allow test-specific extensions in the /shared/extensions directory.
|
||||
# If that directory exists (it won't for most tests), add it to the
|
||||
# extensions path, using a feature in Druid 26 or later.
|
||||
# For maximum flexibility, don't overwrite the extensions path if
|
||||
# it is set.
|
||||
TEST_EXTN_DIR=$SHARED_DIR/extensions
|
||||
if [ -d $TEST_EXTN_DIR ]; then
|
||||
if [ -z "$druid_extensions_path" ]; then
|
||||
export druid_extensions_path="[\"${TEST_EXTN_DIR}\"]"
|
||||
else
|
||||
echo "Extension directory $TEST_EXTN_DIR found, and druid_extensions_path={$druid_extensions_path} -- not setting path automatically"
|
||||
fi
|
||||
fi
|
||||
|
||||
# TODO: enable only for security-related tests?
|
||||
#/tls/generate-server-certs-and-keystores.sh
|
||||
|
||||
|
@ -38,12 +57,12 @@ cd /
|
|||
# The MySQL driver is selected by the Docker Compose file.
|
||||
# Set druid.metadata.mysql.driver.driverClassName to the preferred
|
||||
# driver.
|
||||
|
||||
# Test-specific way to define extensions. Compose defines two test-specific
|
||||
# variables. We combine these to create the final form converted to a property.
|
||||
if [ -n "$MYSQL_DRIVER_CLASSNAME" ]; then
|
||||
export druid_metadata_mysql_driver_driverClassName="$MYSQL_DRIVER_CLASSNAME"
|
||||
fi
|
||||
|
||||
# Test-specific way to define extensions. Compose defines two test-specific
|
||||
# variables. We combine these to create the final form converted to a property.
|
||||
if [ -n "$druid_extensions_loadList" ]; then
|
||||
echo "Using the provided druid_extensions_loadList=$druid_extensions_loadList"
|
||||
else
|
||||
|
@ -76,10 +95,6 @@ DRUID_SERVICE_CONF_DIR="$(. /druid.sh; getConfPath ${DRUID_SERVICE})"
|
|||
# Export the common config file path to use in supervisord conf file
|
||||
DRUID_COMMON_CONF_DIR="$(. /druid.sh; getConfPath _common)"
|
||||
|
||||
SHARED_DIR=/shared
|
||||
LOG_DIR=$SHARED_DIR/logs
|
||||
DRUID_HOME=/usr/local/druid
|
||||
|
||||
# For multiple nodes of the same type to create a unique name
|
||||
INSTANCE_NAME=$DRUID_SERVICE
|
||||
if [ -n "$DRUID_INSTANCE" ]; then
|
||||
|
@ -117,6 +132,8 @@ if [ -d $EXTRA_RESOURCES ]; then
|
|||
CP=$CP:$EXTRA_RESOURCES
|
||||
fi
|
||||
|
||||
# For easier debugging, dump the environment and runtime.properties
|
||||
# to the log.
|
||||
LOG_FILE=$LOG_DIR/${INSTANCE_NAME}.log
|
||||
echo "" >> $LOG_FILE
|
||||
echo "--- env ---" >> $LOG_FILE
|
||||
|
|
229
it.sh
229
it.sh
|
@ -30,7 +30,7 @@ export DRUID_DEV=$(cd $(dirname $0) && pwd)
|
|||
function usage
|
||||
{
|
||||
cat <<EOF
|
||||
Usage: $0 cmd [category]
|
||||
Usage: $0 cmd [category] [module]
|
||||
ci
|
||||
build Druid and the distribution for CI pipelines
|
||||
build
|
||||
|
@ -41,21 +41,32 @@ Usage: $0 cmd [category]
|
|||
Build druid-it-tools
|
||||
image
|
||||
Build the test image
|
||||
up <category>
|
||||
Start the cluster for category
|
||||
down <category>
|
||||
Stop the cluster for category
|
||||
test <category>
|
||||
Start the cluster, run the test for category, and stop the cluster
|
||||
tail <category>
|
||||
Show the last 20 lines of each container log
|
||||
gen
|
||||
up <category> [<module>]
|
||||
Start the cluster for category.
|
||||
down <category> [<module>]
|
||||
Stop the cluster for category.
|
||||
run <category> [<module>]
|
||||
Run the tests for the given module on an alread-running cluster.
|
||||
Does not stop the cluster. Primarily for debugging.
|
||||
test <category> [<module>]
|
||||
Start the cluster, run the test for category, and stop the cluster.
|
||||
tail <category> [<module>]
|
||||
Show the last 20 lines of each container log.
|
||||
gen <category> [<module>]
|
||||
Generate docker-compose.yaml files (done automatically on up)
|
||||
run one IT in Travis (build dist, image, run test, tail logs)
|
||||
github <category>
|
||||
Run one IT in Github Workflows (run test, tail logs)
|
||||
prune
|
||||
prune Docker volumes
|
||||
run one IT in Travis (build dist, image, run test, tail logs).
|
||||
github <category> [<module>]
|
||||
Run one IT in Github Workflows (run test, tail logs).
|
||||
prune-containers
|
||||
Stop all running Docker containers. Do this if "down" won't work
|
||||
because the "docker-compose.yaml" file is no longer available.
|
||||
prune-volumes
|
||||
prune Docker volumes.
|
||||
|
||||
Arguments:
|
||||
category: A defined IT JUnit category, and IT-<category> profile
|
||||
module: relative path to the module with tests. Defaults to
|
||||
integration-tests-ex/cases
|
||||
|
||||
Environment:
|
||||
OVERRIDE_ENV: optional, name of env file to pass to Docker
|
||||
|
@ -70,13 +81,13 @@ EOF
|
|||
|
||||
function tail_logs
|
||||
{
|
||||
category=$1
|
||||
cd integration-tests-ex/cases/target/$category/logs
|
||||
pushd $MODULE_DIR/target/$CATEGORY/logs > /dev/null
|
||||
ls *.log | while read log;
|
||||
do
|
||||
echo "----- $category/$log -----"
|
||||
tail -20 $log
|
||||
echo "----- $CATEGORY/$log -----"
|
||||
tail -100 $log
|
||||
done
|
||||
popd > /dev/null
|
||||
}
|
||||
|
||||
# Many tests require us to pass information into containers using environment variables.
|
||||
|
@ -100,10 +111,13 @@ function tail_logs
|
|||
#
|
||||
# All of the above are combined into a temporary environment file which is then passed
|
||||
# into Docker compose.
|
||||
#
|
||||
# The file is built when the cluster comes up. It is reused in the test and down
|
||||
# commands so we have a consistent environment.
|
||||
function build_override {
|
||||
|
||||
mkdir -p target
|
||||
OVERRIDE_FILE="$(pwd)/target/override.env"
|
||||
mkdir -p "$MODULE_DIR/target"
|
||||
OVERRIDE_FILE="$MODULE_DIR/target/override.env"
|
||||
rm -f "$OVERRIDE_FILE"
|
||||
touch "$OVERRIDE_FILE"
|
||||
|
||||
|
@ -122,36 +136,39 @@ function build_override {
|
|||
cat "$LOCAL_ENV" >> "$OVERRIDE_FILE"
|
||||
fi
|
||||
|
||||
# Add all environment variables of the form druid_*
|
||||
set +e # Grep gives exit status 1 if no lines match. Let's not fail.
|
||||
env | grep "^druid_" >> "$OVERRIDE_FILE"
|
||||
set -e
|
||||
# Add all environment variables of the form druid_*
|
||||
set +e # Grep gives exit status 1 if no lines match. Let's not fail.
|
||||
env | grep "^druid_" >> "$OVERRIDE_FILE"
|
||||
set -e
|
||||
|
||||
# TODO: Add individual env vars that we want to pass from the local
|
||||
# environment into the container.
|
||||
# TODO: Add individual env vars that we want to pass from the local
|
||||
# environment into the container.
|
||||
|
||||
# Reuse the OVERRIDE_ENV variable to pass the full list to Docker compose
|
||||
export OVERRIDE_ENV="$OVERRIDE_FILE"
|
||||
# Reuse the OVERRIDE_ENV variable to pass the full list to Docker compose
|
||||
export OVERRIDE_ENV="$OVERRIDE_FILE"
|
||||
}
|
||||
|
||||
function prepare_category {
|
||||
if [ $# -eq 0 ]; then
|
||||
function reuse_override {
|
||||
OVERRIDE_FILE="$MODULE_DIR/target/override.env"
|
||||
if [ ! -f "$OVERRIDE_FILE" ]; then
|
||||
echo "Override file $OVERRIDE_FILE not found: Was an 'up' run?" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
export OVERRIDE_ENV="$OVERRIDE_FILE"
|
||||
}
|
||||
|
||||
function require_category {
|
||||
if [ -z "$CATEGORY" ]; then
|
||||
usage 1>&2
|
||||
exit 1
|
||||
fi
|
||||
export CATEGORY=$1
|
||||
}
|
||||
|
||||
function prepare_docker {
|
||||
cd $DRUID_DEV/integration-tests-ex/cases
|
||||
build_override
|
||||
}
|
||||
|
||||
function require_env_var {
|
||||
if [ -z "$1" ]; then
|
||||
echo "$1 must be set for test category $CATEGORY" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Verfiy any test-specific environment variables that must be set in this local
|
||||
|
@ -161,52 +178,64 @@ function require_env_var {
|
|||
# ensures we get useful error messages when we forget to set something, rather than
|
||||
# some cryptic use-specific error.
|
||||
function verify_env_vars {
|
||||
case $CATEGORY in
|
||||
"AzureDeepStorage")
|
||||
require_env_var AZURE_ACCOUNT
|
||||
require_env_var AZURE_KEY
|
||||
require_env_var AZURE_CONTAINER
|
||||
;;
|
||||
"GcsDeepStorage")
|
||||
require_env_var GOOGLE_BUCKET
|
||||
require_env_var GOOGLE_PREFIX
|
||||
require_env_var GOOGLE_APPLICATION_CREDENTIALS
|
||||
if [ ! -f "$GOOGLE_APPLICATION_CREDENTIALS" ]; then
|
||||
echo "Required file GOOGLE_APPLICATION_CREDENTIALS=$GOOGLE_APPLICATION_CREDENTIALS is missing" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
"S3DeepStorage")
|
||||
require_env_var DRUID_CLOUD_BUCKET
|
||||
require_env_var DRUID_CLOUD_PATH
|
||||
require_env_var AWS_REGION
|
||||
require_env_var AWS_ACCESS_KEY_ID
|
||||
require_env_var AWS_SECRET_ACCESS_KEY
|
||||
;;
|
||||
esac
|
||||
VERIFY_SCRIPT="$MODULE_DIR/cluster/$DRUID_INTEGRATION_TEST_GROUP/verify.sh"
|
||||
if [ -f "$VERIFY_SCRIPT" ]; then
|
||||
. "$VERIFY_SCRIPT"
|
||||
fi
|
||||
}
|
||||
|
||||
if [ $# = 0 ]; then
|
||||
if [ $# -eq 0 ]; then
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
CMD=$1
|
||||
shift
|
||||
MAVEN_IGNORE="-P skip-static-checks,skip-tests -Dmaven.javadoc.skip=true"
|
||||
if [ $# -gt 0 ]; then
|
||||
CATEGORY=$1
|
||||
shift
|
||||
fi
|
||||
|
||||
# Handle an IT in either the usual druid-it-cases project, or elsewhere,
|
||||
# typically in an extension. The Maven module, if needed must be the third
|
||||
# parameter in path, not coordinate, form.
|
||||
if [ $# -eq 0 ]; then
|
||||
# Use the usual project
|
||||
MAVEN_PROJECT=":druid-it-cases"
|
||||
# Don't provide a project path to cluster.sh
|
||||
unset IT_MODULE_DIR
|
||||
# Generate the override.sh file in the druid-it-cases module
|
||||
MODULE_DIR=$DRUID_DEV/integration-tests-ex/cases
|
||||
else
|
||||
# The test module is given via the command line argument as a relative path
|
||||
MAVEN_PROJECT="$1"
|
||||
# Compute the full path to the target module for use by cluster.sh
|
||||
export IT_MODULE_DIR="$DRUID_DEV/$1"
|
||||
# Write the override.sh file to the target module
|
||||
MODULE_DIR=$IT_MODULE_DIR
|
||||
shift
|
||||
fi
|
||||
|
||||
IT_CASES_DIR="$DRUID_DEV/integration-tests-ex/cases"
|
||||
|
||||
# Added -Dcyclonedx.skip=true to avoid ISO-8859-1 [ERROR]s
|
||||
# May be fixed in the future
|
||||
MAVEN_IGNORE="-P skip-static-checks,skip-tests -Dmaven.javadoc.skip=true -Dcyclonedx.skip=true"
|
||||
TEST_OPTIONS="verify -P skip-static-checks,docker-tests \
|
||||
-Dmaven.javadoc.skip=true -Dcyclonedx.skip=true -DskipUTs=true"
|
||||
|
||||
case $CMD in
|
||||
"help" )
|
||||
usage
|
||||
;;
|
||||
"ci" )
|
||||
mvn -q clean package dependency:go-offline -P dist $MAVEN_IGNORE
|
||||
mvn -q clean install dependency:go-offline -P dist $MAVEN_IGNORE
|
||||
;;
|
||||
"build" )
|
||||
mvn clean package -P dist $MAVEN_IGNORE -T1.0C $*
|
||||
mvn clean install -P dist $MAVEN_IGNORE -T1.0C $*
|
||||
;;
|
||||
"dist" )
|
||||
mvn package -P dist $MAVEN_IGNORE -pl :distribution
|
||||
mvn install -P dist $MAVEN_IGNORE -pl :distribution
|
||||
;;
|
||||
"tools" )
|
||||
mvn install -pl :druid-it-tools
|
||||
|
@ -218,44 +247,70 @@ case $CMD in
|
|||
"gen")
|
||||
# Generate the docker-compose.yaml files. Mostly for debugging
|
||||
# since the up command does generation implicitly.
|
||||
prepare_category $1
|
||||
prepare_docker
|
||||
./cluster.sh gen $CATEGORY
|
||||
require_category
|
||||
$IT_CASES_DIR/cluster.sh gen $CATEGORY
|
||||
;;
|
||||
"up" )
|
||||
prepare_category $1
|
||||
prepare_docker
|
||||
require_category
|
||||
build_override
|
||||
verify_env_vars
|
||||
./cluster.sh up $CATEGORY
|
||||
$IT_CASES_DIR/cluster.sh up $CATEGORY
|
||||
;;
|
||||
"down" )
|
||||
prepare_category $1
|
||||
prepare_docker
|
||||
./cluster.sh down $CATEGORY
|
||||
require_category
|
||||
reuse_override
|
||||
$IT_CASES_DIR/cluster.sh down $CATEGORY
|
||||
;;
|
||||
"run" )
|
||||
require_category
|
||||
reuse_override
|
||||
mvn $TEST_OPTIONS -P IT-$CATEGORY -pl $MAVEN_PROJECT
|
||||
;;
|
||||
"test" )
|
||||
prepare_category $1
|
||||
prepare_docker
|
||||
mvn verify -P skip-static-checks,docker-tests,IT-$CATEGORY \
|
||||
-Dmaven.javadoc.skip=true -DskipUTs=true \
|
||||
-pl :druid-it-cases
|
||||
require_category
|
||||
build_override
|
||||
verify_env_vars
|
||||
$IT_CASES_DIR/cluster.sh up $CATEGORY
|
||||
|
||||
# Run the test. On failure, still shut down the cluster.
|
||||
# Return Maven's return code as the script's return code.
|
||||
set +e
|
||||
mvn $TEST_OPTIONS -P IT-$CATEGORY -pl $MAVEN_PROJECT
|
||||
RESULT=$?
|
||||
set -e
|
||||
$IT_CASES_DIR/cluster.sh down $CATEGORY
|
||||
exit $RESULT
|
||||
;;
|
||||
"tail" )
|
||||
prepare_category $1
|
||||
tail_logs $CATEGORY
|
||||
require_category
|
||||
tail_logs
|
||||
;;
|
||||
"github" )
|
||||
prepare_category $1
|
||||
set +e
|
||||
$0 test $CATEGORY
|
||||
$0 tail $CATEGORY
|
||||
RESULT=$?
|
||||
|
||||
# Include logs, but only for failures.
|
||||
if [ $RESULT -ne 0 ]; then
|
||||
$0 tail $CATEGORY
|
||||
fi
|
||||
exit $RESULT
|
||||
;;
|
||||
"prune" )
|
||||
# Name is deliberately long to avoid accidental use.
|
||||
"prune-containers" )
|
||||
if [ $(docker ps | wc -l) -ne 1 ]; then
|
||||
echo "Cleaning running containers"
|
||||
docker ps
|
||||
docker ps -aq | xargs -r docker rm -f
|
||||
fi
|
||||
;;
|
||||
"prune-volumes" )
|
||||
# Caution: this removes all volumes, which is generally what you
|
||||
# want when testing.
|
||||
docker system prune --volumes
|
||||
docker system prune -af --volumes
|
||||
;;
|
||||
* )
|
||||
usage
|
||||
exit -1
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
|
|
@ -297,7 +297,7 @@ public abstract class QueryResultPusher
|
|||
/**
|
||||
* Gets the results of running the query. {@link #start} must be called before this method is called.
|
||||
*
|
||||
* @return the results of running the query as preparted by the {@link #start()} method
|
||||
* @return the results of running the query as prepared by the {@link #start()} method
|
||||
*/
|
||||
QueryResponse<Object> getQueryResponse();
|
||||
|
||||
|
|
|
@ -64,7 +64,6 @@ import java.util.List;
|
|||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
|
||||
@Command(
|
||||
name = "pull-deps",
|
||||
description = "Pull down dependencies to the local repository specified by druid.extensions.localRepository, extensions directory specified by druid.extensions.extensionsDir and hadoop dependencies directory specified by druid.extensions.hadoopDependenciesDir"
|
||||
|
@ -88,39 +87,38 @@ public class PullDependencies implements Runnable
|
|||
.put("com.fasterxml.jackson.core", "jackson-annotations")
|
||||
.build();
|
||||
/*
|
||||
It is possible that extensions will pull down a lot of jars that are either
|
||||
duplicates OR conflict with druid jars. In that case, there are two problems that arise
|
||||
|
||||
// It is possible that extensions will pull down a lot of jars that are either
|
||||
// duplicates OR conflict with druid jars. In that case, there are two problems that arise
|
||||
//
|
||||
// 1. Large quantity of jars are passed around to things like hadoop when they are not needed (and should not be included)
|
||||
// 2. Classpath priority becomes "mostly correct" and attempted to enforced correctly, but not fully tested
|
||||
//
|
||||
// These jar groups should be included by druid and *not* pulled down in extensions
|
||||
// Note to future developers: This list is hand-crafted and will probably be out of date in the future
|
||||
// A good way to know where to look for errant dependencies is to compare the lib/ directory in the distribution
|
||||
// tarball with the jars included in the extension directories.
|
||||
//
|
||||
// This list is best-effort, and might still pull down more than desired.
|
||||
//
|
||||
// A simple example is that if an extension's dependency uses some-library-123.jar,
|
||||
// druid uses some-library-456.jar, and hadoop uses some-library-666.jar, then we probably want to use some-library-456.jar,
|
||||
// so don't pull down some-library-123.jar, and ask hadoop to load some-library-456.jar.
|
||||
//
|
||||
// In the case where some-library is NOT on this list, both some-library-456.jar and some-library-123.jar will be
|
||||
// on the class path and propagated around the system. Most places TRY to make sure some-library-456.jar has
|
||||
// precedence, but it is easy for this assumption to be violated and for the precedence of some-library-456.jar,
|
||||
// some-library-123.jar and some-library-456.jar to not be properly defined.
|
||||
//
|
||||
// As of this writing there are no special unit tests for classloader issues and library version conflicts.
|
||||
//
|
||||
// Different tasks which are classloader sensitive attempt to maintain a sane order for loading libraries in the
|
||||
// classloader, but it is always possible that something didn't load in the right order. Also we don't want to be
|
||||
// throwing around a ton of jars we don't need to.
|
||||
//
|
||||
// Here is a list of dependencies extensions should probably exclude.
|
||||
//
|
||||
// Conflicts can be discovered using the following command on the distribution tarball:
|
||||
// `find lib -iname "*.jar" | cut -d / -f 2 | sed -e 's/-[0-9]\.[0-9]/@/' | cut -f 1 -d @ | sort | uniq | xargs -I {} find extensions -name "*{}*.jar" | sort`
|
||||
1. Large quantity of jars are passed around to things like hadoop when they are not needed (and should not be included)
|
||||
2. Classpath priority becomes "mostly correct" and attempted to enforced correctly, but not fully tested
|
||||
|
||||
These jar groups should be included by druid and *not* pulled down in extensions
|
||||
Note to future developers: This list is hand-crafted and will probably be out of date in the future
|
||||
A good way to know where to look for errant dependencies is to compare the lib/ directory in the distribution
|
||||
tarball with the jars included in the extension directories.
|
||||
|
||||
This list is best-effort, and might still pull down more than desired.
|
||||
|
||||
A simple example is that if an extension's dependency uses some-library-123.jar,
|
||||
druid uses some-library-456.jar, and hadoop uses some-library-666.jar, then we probably want to use some-library-456.jar,
|
||||
so don't pull down some-library-123.jar, and ask hadoop to load some-library-456.jar.
|
||||
|
||||
In the case where some-library is NOT on this list, both some-library-456.jar and some-library-123.jar will be
|
||||
on the class path and propagated around the system. Most places TRY to make sure some-library-456.jar has
|
||||
precedence, but it is easy for this assumption to be violated and for the precedence of some-library-456.jar,
|
||||
some-library-123.jar and some-library-456.jar to not be properly defined.
|
||||
|
||||
As of this writing there are no special unit tests for classloader issues and library version conflicts.
|
||||
|
||||
Different tasks which are classloader sensitive attempt to maintain a sane order for loading libraries in the
|
||||
classloader, but it is always possible that something didn't load in the right order. Also we don't want to be
|
||||
throwing around a ton of jars we don't need to.
|
||||
|
||||
Here is a list of dependencies extensions should probably exclude.
|
||||
|
||||
Conflicts can be discovered using the following command on the distribution tarball:
|
||||
`find lib -iname "*.jar" | cut -d / -f 2 | sed -e 's/-[0-9]\.[0-9]/@/' | cut -f 1 -d @ | sort | uniq | xargs -I {} find extensions -name "*{}*.jar" | sort`
|
||||
|
||||
"org.apache.druid",
|
||||
"com.metamx.druid",
|
||||
|
|
|
@ -395,7 +395,7 @@
|
|||
</configuration>
|
||||
</plugin>
|
||||
|
||||
<!-- Adds the path of the generated parser to the classpath -->
|
||||
<!-- Adds the path of the generated parser to the build path -->
|
||||
<plugin>
|
||||
<groupId>org.codehaus.mojo</groupId>
|
||||
<artifactId>build-helper-maven-plugin</artifactId>
|
||||
|
|
Loading…
Reference in New Issue