Support both Indexer and MiddleManager in ITs (#13660)

Support both indexer and MM in ITs

Support for the DRUID_INTEGRATION_TEST_INDEXER variable
Conditional client cluster configuration
Cleanup of OVERRIDE_ENV file handling
Enforce setting of test-specific env vars
Cleanup of unused bits
This commit is contained in:
Paul Rogers 2023-01-14 14:34:06 -08:00 committed by GitHub
parent 566fc990e4
commit ed623d626f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 421 additions and 115 deletions

View File

@ -51,28 +51,28 @@ fi
CMD=$1
shift
# All commands need env vars
ENV_FILE=$MODULE_DIR/../image/target/env.sh
if [ ! -f $ENV_FILE ]; then
echo "Please build the Docker test image before testing" 1>&2
exit 1
fi
source $ENV_FILE
function category {
if [ $# -eq 0 ]; then
usage 1>&2
exit 1
fi
export CATEGORY=$1
# All commands need env vars
ENV_FILE=$MODULE_DIR/../image/target/env.sh
if [ ! -f $ENV_FILE ]; then
echo "Please build the Docker test image before testing" 1>&2
exit 1
fi
source $ENV_FILE
# The untranslated category is used for the local name of the
# shared folder.
# DRUID_INTEGRATION_TEST_GROUP is used in
# docker-compose files and here. Despite the name, it is the
# name of the cluster configuration we want to run, not the
# test category. Multiple categories an map to the same cluster
# test category. Multiple categories can map to the same cluster
# definition.
# Map from category name to shared cluster definition name.
@ -105,34 +105,6 @@ function category {
export ENV_FILE="$TARGET_DIR/${CATEGORY}.env"
}
function build_override {
mkdir -p target
rm -f "$ENV_FILE"
touch "$ENV_FILE"
# User-local settings?
LOCAL_ENV="$HOME/druid-it/${CATEGORY}.env"
if [ -f "$LOCAL_ENV" ]; then
cat "$LOCAL_ENV" >> "$ENV_FILE"
fi
# Provided override file
if [ -n "$OVERRIDE_ENV" ]; then
if [ ! -f "$OVERRIDE_ENV" ]; then
echo "Environment override file (OVERRIDE_ENV) not found: $OVERRIDE_ENV" 1>&2
exit 1
fi
cat "$OVERRIDE_ENV" >> "$ENV_FILE"
fi
# Add all environment variables of the form druid_*
env | grep "^druid_" >> "$ENV_FILE"
# Reuse the OVERRIDE_ENV variable to pass the full list to Docker compose
export OVERRIDE_ENV="$ENV_FILE"
}
# Dump lots of information to debug Docker failures when run inside
# of a build environment where we can't inspect Docker directly.
function show_status {
@ -165,6 +137,31 @@ function build_shared_dir {
chmod -R a+rwx $SHARED_DIR
}
# Each test must have a default docker-compose.yaml file which corresponds to using
# the MiddleManager (or no indexer). A test can optionally include a second file called
# docker-compose-indexer.yaml which uses the Indexer in place of Middle Manager.
function docker_file {
compose_args=""
if [ -n "$DRUID_INTEGRATION_TEST_INDEXER" ]; then
# Sanity check: DRUID_INTEGRATION_TEST_INDEXER must be "indexer" or "middleManager"
# if it is set at all.
if [ "$DRUID_INTEGRATION_TEST_INDEXER" != "indexer" ] && [ "$DRUID_INTEGRATION_TEST_INDEXER" != "middleManager" ]
then
echo "DRUID_INTEGRATION_TEST_INDEXER must be 'indexer' or 'middleManager' (is '$DRUID_INTEGRATION_TEST_INDEXER')" 1>&2
exit 1
fi
if [ "$DRUID_INTEGRATION_TEST_INDEXER" == "indexer" ]; then
compose_file=docker-compose-indexer.yaml
if [ ! -f "$CLUSTER_DIR/$compose_file" ]; then
echo "DRUID_INTEGRATION_TEST_INDEXER=$DRUID_INTEGRATION_TEST_INDEXER, but $CLUSTER_DIR/$compose_file is missing" 1>&2
exit 1
fi
compose_args="-f $compose_file"
fi
fi
echo $compose_args
}
# Print environment for debugging
#env
@ -179,10 +176,9 @@ case $CMD in
"up" )
category $*
echo "Starting cluster $DRUID_INTEGRATION_TEST_GROUP"
build_override
build_shared_dir
cd $CLUSTER_DIR
docker-compose up -d
docker-compose `docker_file` up -d
# Enable the following for debugging
#show_status
;;
@ -196,12 +192,12 @@ case $CMD in
# Enable the following for debugging
#show_status
cd $CLUSTER_DIR
echo OVERRIDE_ENV="$ENV_FILE" docker-compose $CMD
OVERRIDE_ENV="$ENV_FILE" docker-compose $CMD
echo docker-compose `docker_file` $CMD
docker-compose `docker_file` $CMD
;;
"*" )
category $*
cd $CLUSTER_DIR
OVERRIDE_ENV="$ENV_FILE" docker-compose $CMD
docker-compose `docker_file` $CMD
;;
esac

View File

@ -12,6 +12,15 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
# Cluster for the Azure deep storage test.
#
# Required env vars:
#
# AZURE_ACCOUNT
# AZURE_KEY
# AZURE_CONTAINER
networks:
druid-it-net:

View File

@ -0,0 +1,98 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
networks:
druid-it-net:
name: druid-it-net
ipam:
config:
- subnet: 172.172.172.0/24
services:
zookeeper:
extends:
file: ../Common/dependencies.yaml
service: zookeeper
metadata:
extends:
file: ../Common/dependencies.yaml
service: metadata
coordinator:
extends:
file: ../Common/druid.yaml
service: coordinator
container_name: coordinator
environment:
- DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
# The frequency with which the coordinator polls the database
# for changes. The DB population code has to wait at least this
# long for the coordinator to notice changes.
- druid_manager_segments_pollDuration=PT5S
- druid_coordinator_period=PT10S
depends_on:
- zookeeper
- metadata
overlord:
extends:
file: ../Common/druid.yaml
service: overlord
container_name: overlord
environment:
- DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
depends_on:
- zookeeper
- metadata
broker:
extends:
file: ../Common/druid.yaml
service: broker
environment:
- DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
depends_on:
- zookeeper
router:
extends:
file: ../Common/druid.yaml
service: router
environment:
- DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
depends_on:
- zookeeper
historical:
extends:
file: ../Common/druid.yaml
service: historical
environment:
- DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
depends_on:
- zookeeper
indexer:
extends:
file: ../Common/druid.yaml
service: indexer
environment:
- DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
volumes:
# Test data
- ../../resources:/resources
depends_on:
- zookeeper

View File

@ -85,10 +85,10 @@ services:
depends_on:
- zookeeper
indexer:
middlemanager:
extends:
file: ../Common/druid.yaml
service: indexer
service: middlemanager
environment:
- DRUID_INTEGRATION_TEST_GROUP=${DRUID_INTEGRATION_TEST_GROUP}
volumes:

View File

@ -12,6 +12,16 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
# Cluster for the Google Cluster Storage (GCS) deep storage test.
#
# Required env vars:
#
# GOOGLE_BUCKET
# GOOGLE_PREFIX
# GOOGLE_APPLICATION_CREDENTIALS - must point to a file that holds the Google
# credentials. Mounted into each Druid container.
networks:
druid-it-net:
@ -49,7 +59,7 @@ services:
- druid_manager_segments_pollDuration=PT5S
- druid_coordinator_period=PT10S
volumes:
# Copy credentials file
# Mount credentials file
- ${GOOGLE_APPLICATION_CREDENTIALS}:/resources/credentials.json
depends_on:
- zookeeper
@ -68,7 +78,7 @@ services:
- druid_google_prefix=${GOOGLE_PREFIX}
- GOOGLE_APPLICATION_CREDENTIALS=/resources/credentials.json
volumes:
# Copy credentials file
# Mount credentials file
- ${GOOGLE_APPLICATION_CREDENTIALS}:/resources/credentials.json
depends_on:
- zookeeper
@ -86,7 +96,7 @@ services:
- druid_google_prefix=${GOOGLE_PREFIX}
- GOOGLE_APPLICATION_CREDENTIALS=/resources/credentials.json
volumes:
# Copy credentials file
# Mount credentials file
- ${GOOGLE_APPLICATION_CREDENTIALS}:/resources/credentials.json
depends_on:
- zookeeper
@ -103,7 +113,7 @@ services:
- druid_google_prefix=${GOOGLE_PREFIX}
- GOOGLE_APPLICATION_CREDENTIALS=/resources/credentials.json
volumes:
# Copy credentials file
# Mount credentials file
- ${GOOGLE_APPLICATION_CREDENTIALS}:/resources/credentials.json
depends_on:
- zookeeper
@ -120,7 +130,7 @@ services:
- druid_google_prefix=${GOOGLE_PREFIX}
- GOOGLE_APPLICATION_CREDENTIALS=/resources/credentials.json
volumes:
# Copy credentials file
# Mount credentials file
- ${GOOGLE_APPLICATION_CREDENTIALS}:/resources/credentials.json
depends_on:
- zookeeper
@ -137,7 +147,7 @@ services:
- druid_google_prefix=${GOOGLE_PREFIX}
- GOOGLE_APPLICATION_CREDENTIALS=/resources/credentials.json
volumes:
# Copy credentials file
# Mount credentials file
- ${GOOGLE_APPLICATION_CREDENTIALS}:/resources/credentials.json
# Test data
- ../data:/resources

View File

@ -12,6 +12,15 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
# Cluster for the S3 deep storage test.
#
# Required env vars:
#
# AWS_REGION
# AWS_ACCESS_KEY_ID
# AWS_SECRET_ACCESS_KEY
networks:
druid-it-net:

View File

@ -24,6 +24,7 @@ import com.fasterxml.jackson.annotation.JsonInclude.Include;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
import com.google.common.collect.ImmutableSet;
import org.apache.druid.java.util.common.ISE;
import org.apache.druid.testsEx.config.ServiceConfig.DruidConfig;
import org.apache.druid.testsEx.config.ServiceConfig.ZKConfig;
@ -35,6 +36,7 @@ import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* Java representation of the test configuration YAML.
@ -164,7 +166,24 @@ public class ClusterConfig
public ResolvedConfig resolve(String clusterName)
{
return new ResolvedConfig(clusterName, resolveIncludes());
Set<String> configTags = createConfigTags();
return new ResolvedConfig(clusterName, resolveIncludes(), configTags);
}
/**
* Create the set of configuration tags for this run. At present, the only options
* are "middleManager" or "indexer" corresponding to the value of the
* {@code DRUID_INTEGRATION_TEST_INDEXER} env var which says whether this cluster has
* an indexer or middle manager.
*/
private Set<String> createConfigTags()
{
String indexer = "middleManager";
String indexerValue = System.getenv("DRUID_INTEGRATION_TEST_INDEXER");
if (indexerValue != null) {
indexer = indexerValue;
}
return ImmutableSet.of(indexer);
}
public ClusterConfig resolveIncludes()

View File

@ -400,7 +400,6 @@ public class Initializer
private final ResolvedConfig clusterConfig;
private final Injector injector;
private final Lifecycle lifecycle;
private MetastoreClient metastoreClient;
private DruidClusterClient clusterClient;
private Initializer(Builder builder)

View File

@ -28,10 +28,9 @@ public class KafkaConfig extends ServiceConfig
{
@JsonCreator
public KafkaConfig(
@JsonProperty("service") String service,
@JsonProperty("instances") List<ServiceInstance> instances
)
{
super(service, instances);
super(instances);
}
}

View File

@ -58,7 +58,6 @@ public class MetastoreConfig extends ServiceConfig
@JsonCreator
public MetastoreConfig(
@JsonProperty("service") String service,
@JsonProperty("driver") String driver,
@JsonProperty("connectURI") String connectURI,
@JsonProperty("user") String user,
@ -67,7 +66,7 @@ public class MetastoreConfig extends ServiceConfig
@JsonProperty("instances") List<ServiceInstance> instances
)
{
super(service, instances);
super(instances);
this.driver = driver;
this.connectURI = connectURI;
this.user = user;

View File

@ -39,6 +39,7 @@ import java.nio.charset.StandardCharsets;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
public class ResolvedConfig
{
@ -67,7 +68,7 @@ public class ResolvedConfig
private final ResolvedMetastore metastore;
private final Map<String, ResolvedDruidService> druidServices = new HashMap<>();
public ResolvedConfig(String category, ClusterConfig config)
public ResolvedConfig(String category, ClusterConfig config, Set<String> configTags)
{
this.category = category;
type = config.type() == null ? ClusterType.docker : config.type();
@ -115,8 +116,14 @@ public class ResolvedConfig
if (config.druid() != null) {
for (Entry<String, DruidConfig> entry : config.druid().entrySet()) {
DruidConfig service = entry.getValue();
if (service.ifTag() != null) {
if (!configTags.contains(service.ifTag())) {
continue;
}
}
druidServices.put(entry.getKey(),
new ResolvedDruidService(this, entry.getValue(), entry.getKey()));
new ResolvedDruidService(this, service, entry.getKey()));
}
}
}

View File

@ -36,7 +36,7 @@ public class ResolvedService
public ResolvedService(ResolvedConfig root, ServiceConfig config, String name)
{
this.service = config.service() == null ? name : config.service();
this.service = name;
for (ServiceInstance instanceConfig : config.instances()) {
this.instances.add(new ResolvedInstance(root, instanceConfig, this));
}

View File

@ -24,29 +24,21 @@ import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonInclude.Include;
import com.fasterxml.jackson.annotation.JsonProperty;
import javax.annotation.Nullable;
import java.util.List;
public class ServiceConfig
{
protected final String service;
protected List<ServiceInstance> instances;
public ServiceConfig(
String service,
List<ServiceInstance> instances
)
{
this.service = service;
this.instances = instances;
}
@JsonProperty("service")
@JsonInclude(Include.NON_NULL)
public String service()
{
return service;
}
@JsonProperty("instances")
@JsonInclude(Include.NON_NULL)
public List<ServiceInstance> instances()
@ -74,12 +66,11 @@ public class ServiceConfig
@JsonCreator
public ZKConfig(
@JsonProperty("service") String service,
@JsonProperty("startTimeoutSecs") int startTimeoutSecs,
@JsonProperty("instances") List<ServiceInstance> instances
)
{
super(service, instances);
super(instances);
this.startTimeoutSecs = startTimeoutSecs;
}
@ -96,21 +87,41 @@ public class ServiceConfig
* in the {@code druid} map: <code><pre>
* druid:
* broker: # <-- key (service name)
* if: config-tag
* instances:
* ...
* </pre></code>
*
* Where {@code config-tag} is a string that indicates a config
* option. At present there are two:
* <ul>
* <li>{@code middleManager}: cluster users the Middle Manager.</li>
* <li>{@code indexer}: cluster uses the Indexer.</li>
* <li>
*
* A service is included in the resolved config only if the corresponding
* config tag is set.
*/
public static class DruidConfig extends ServiceConfig
{
private final String ifTag;
@JsonCreator
public DruidConfig(
// Note: service is not actually used.
@JsonProperty("service") String service,
@JsonProperty("if") String ifTag,
@JsonProperty("instances") List<ServiceInstance> instances
)
{
super(service, instances);
super(instances);
this.ifTag = ifTag;
}
@Nullable
@JsonProperty("if")
@JsonInclude(Include.NON_NULL)
public String ifTag()
{
return ifTag;
}
}
}

View File

@ -31,6 +31,11 @@ druid:
instances:
- port: 8083
middlemanager:
if: middleManager
instances:
- port: 8091
indexer:
if: indexer
instances:
- port: 8091
broker:

View File

@ -239,3 +239,29 @@ test. Your test will run in parallel with all other IT categories, which is why
we offered the advice above: the test has to have a good reason to fire up yet
another build task.
### Choosing the Middle Manager or Indexer
Tests should run on the Middle Manager by default. Tests can optionally run on the
Indexer. To run on Indexer:
* In the environment, `export DRUID_INTEGRATION_TEST_INDEXER=indexer`. (Use `middleManager`
otherwise. If the variable is not set, `middleManager` is the default.)
* The `cluster/<category>/docker-compose.yaml` file should be for the Middle manager. Create
a separate file called `cluster/<category>/docker-compose-indexer.yaml` to define the
Indexer-based cluster.
* The test `src/test/resources/cluster/<category>/docker.yaml` file should contain a conditional
entry to select define either the Middle Manager or Indexer. Example:
```yaml
middlemanager:
if: middleManager
instances:
- port: 8091
indexer:
if: indexer
instances:
- port: 8091
```
Now, the test will run on Indexer if the above environment variable is set, Middle Manager
otherwise.

View File

@ -414,15 +414,28 @@ changes itself somehow.
Generic object to describe Docker Compose services.
#### `service`
#### `if`
Conditionally defines a service. The system defines a set of configutation tags.
At present there are only two:
* `middleManager`: the cluster runs a MiddleManager
* `indexer`: the cluster runs an Indexer
The `if` tag conditionally enables a service only if the corresponding tag is set.
Thus, for a cluster that can use either a middle manager or an indexer:
```yaml
service: <service name>
middlemanager:
if: middleManager
instances:
- port: 8091
indexer:
if: indexer
instances:
- port: 8091
```
Name of the service as known to Docker Compose. Defaults to be
the same as the service name used in this configuration file.
#### `instances`
```yaml

174
it.sh
View File

@ -17,9 +17,14 @@
#--------------------------------------------------------------------
# Utility script for running the new integration tests, since the Maven
# commands are unwieldy.
# commands are unwieldy. Allows straightforward usage of ITs on the desktop
# and in various build scripts. Handles configuration of various kinds.
set -e
# Enable for debugging
#set -x
export DRUID_DEV=$(cd $(dirname $0) && pwd)
function usage
@ -46,6 +51,15 @@ Usage: $0 cmd [category]
run one IT in Travis (build dist, image, run test, tail logs)
prune
prune Docker volumes
Environment:
OVERRIDE_ENV: optional, name of env file to pass to Docker
DRUID_INTEGRATION_TEST_INDEXER: Set to middleManager (default if not set)
or "indexer". If "indexer", requires docker-compose-indexer.yaml exist.
druid_*: passed to the container.
Other, test-specific variables.
See docs for additional details.
EOF
}
@ -60,6 +74,113 @@ function tail_logs
done
}
# Many tests require us to pass information into containers using environment variables.
# The Docker environment is distinct from the environment running this script. We bridge
# the two by passing into Docker compose a file that contains all env vars we want to
# "export" from our local environment into the container environment.
# There are three ways to provide these options:
#
# 1. Directly in the environment. (Simplest and best.) We support a fixed set of variables:
# <need the list>
# 2. For ad-hoc use, as var=value pairs in a file with the same name as the
# test catagory, in the home folder under ~/druid-it. Example:
# BatchIndex.env. Use this to hold credentials and other info which you must
# pass into tests when running locally.
# 3. A file given by the OVERRIDE_ENV environment variable. That is, OVERRIDE_ENV holds
# the path to a file of var=value pairs. Historically, this file was created by a
# build environment such as Travis. However, it is actually simpler just to use
# option 1: just set the values in the environment and let Linux pass them through to
# this script.
# 4. Environment variables of the form "druid_" used to create the Druid config file.
#
# All of the above are combined into a temporary environment file which is then passed
# into Docker compose.
function build_override {
mkdir -p target
OVERRIDE_FILE="override.env"
rm -f "$OVERRIDE_FILE"
touch "$OVERRIDE_FILE"
# Provided override file
if [ -n "$OVERRIDE_ENV" ]; then
if [ ! -f "$OVERRIDE_ENV" ]; then
echo "Environment override file OVERRIDE_ENV not found: $OVERRIDE_ENV" 1>&2
exit 1
fi
cat "$OVERRIDE_ENV" >> "$OVERRIDE_FILE"
fi
# User-local settings?
LOCAL_ENV="$HOME/druid-it/${CATEGORY}.env"
if [ -f "$LOCAL_ENV" ]; then
cat "$LOCAL_ENV" >> "$OVERRIDE_FILE"
fi
# Add all environment variables of the form druid_*
set +e # Grep gives exit status 1 if no lines match. Let's not fail.
env | grep "^druid_" >> "$OVERRIDE_FILE"
set -e
# TODO: Add individual env vars that we want to pass from the local
# environment into the container.
# Reuse the OVERRIDE_ENV variable to pass the full list to Docker compose
target_dir=`pwd`
export OVERRIDE_ENV="$target_dir/$OVERRIDE_FILE"
}
function prepare_category {
if [ $# -eq 0 ]; then
usage 1>&2
exit 1
fi
export CATEGORY=$1
}
function prepare_docker {
cd $DRUID_DEV/integration-tests-ex/cases
build_override
verify_env_vars
}
function require_env_var {
if [ -n "$1" ]; then
echo "$1 must be set for test category $CATEGORY" 1>&2
exit 1
fi
}
# Verfiy any test-specific environment variables that must be set in this local
# environment (and generally passed into the Docker container via docker-compose.yaml).
#
# Add entries here as you add env var references in docker-compose.yaml. Doing so
# ensures we get useful error messages when we forget to set something, rather than
# some cryptic use-specific error.
function verify_env_vars {
case $CATEGORY in
"AzureDeepStorage")
require_env_var AZURE_ACCOUNT
require_env_var AZURE_KEY
require_env_var AZURE_CONTAINER
;;
"GcsDeepStorage")
require_env_var GOOGLE_BUCKET
require_env_var GOOGLE_PREFIX
require_env_var GOOGLE_APPLICATION_CREDENTIALS
if [ ! -f "$GOOGLE_APPLICATION_CREDENTIALS" ]; then
echo "Required file GOOGLE_APPLICATION_CREDENTIALS=$GOOGLE_APPLICATION_CREDENTIALS is missing" 1>&2
exit 1
fi
;;
"S3DeepStorage")
require_env_var AWS_REGION
require_env_var AWS_ACCESS_KEY_ID
require_env_var AWS_SECRET_ACCESS_KEY
;;
esac
}
CMD=$1
shift
MAVEN_IGNORE="-P skip-static-checks,skip-tests -Dmaven.javadoc.skip=true"
@ -82,48 +203,33 @@ case $CMD in
mvn install -P test-image $MAVEN_IGNORE
;;
"up" )
if [ -z "$1" ]; then
usage
exit 1
fi
cd $DRUID_DEV/integration-tests-ex/cases
./cluster.sh up $1
prepare_category $1
prepare_docker
./cluster.sh up $CATEGORY
;;
"down" )
if [ -z "$1" ]; then
usage
exit 1
fi
cd $DRUID_DEV/integration-tests-ex/cases
./cluster.sh down $1
prepare_category $1
prepare_docker
./cluster.sh down $CATEGORY
;;
"test" )
if [ -z "$1" ]; then
usage
exit 1
fi
cd $DRUID_DEV/integration-tests-ex/cases
mvn verify -P skip-static-checks,docker-tests,IT-$1 \
prepare_category $1
prepare_docker
mvn verify -P skip-static-checks,docker-tests,IT-$CATEGORY \
-Dmaven.javadoc.skip=true -DskipUTs=true \
-pl :druid-it-cases
;;
"tail" )
if [ -z "$1" ]; then
usage
exit 1
fi
tail_logs $1
prepare_category $1
tail_logs $CATEGORY
;;
"travis" )
prepare_category $1
$0 dist
$0 image
$0 test $CATEGORY
$0 tail $CATEGORY
;;
"travis" )
if [ -z "$1" ]; then
usage
exit 1
fi
$0 dist
$0 image
$0 test $1
$0 tail $1
;;
"prune" )
# Caution: this removes all volumes, which is generally what you
# want when testing.