mirror of https://github.com/apache/druid.git
Integration tests for JDK 11 (#9249)
* Integration tests for JDK 11 * fix vm option * fix superviosrd * fix pom * add integration tests for java 11 * add logs * update docs * Update dockerfile to ack AdoptOpenJdk for Java 11 install commands
This commit is contained in:
parent
b1f38131af
commit
31528bcdaf
101
.travis.yml
101
.travis.yml
|
@ -268,13 +268,18 @@ jobs:
|
||||||
|
|
||||||
" && false; }
|
" && false; }
|
||||||
|
|
||||||
|
# Integration tests Java Compile version is set by the machine environment jdk (set by the jdk key)
|
||||||
|
# Integration tests Java Runtime version is set by the JVM_RUNTIME env property (set env key to -Djvm.runtime=<JVM_RUNTIME_VERSION>)
|
||||||
|
# (Currently integration tests only support running with jvm runtime 8 and 11)
|
||||||
|
# START - Integration tests for Compile with Java 8 and Run with Java 8
|
||||||
- &integration_batch_index
|
- &integration_batch_index
|
||||||
name: "batch index integration test"
|
name: "(Compile=openjdk8, Run=openjdk8) batch index integration test"
|
||||||
|
jdk: openjdk8
|
||||||
services: &integration_test_services
|
services: &integration_test_services
|
||||||
- docker
|
- docker
|
||||||
env: TESTNG_GROUPS='-Dgroups=batch-index'
|
env: TESTNG_GROUPS='-Dgroups=batch-index' JVM_RUNTIME='-Djvm.runtime=8'
|
||||||
script: &run_integration_test
|
script: &run_integration_test
|
||||||
- ${MVN} verify -pl integration-tests -P integration-tests ${TESTNG_GROUPS} ${MAVEN_SKIP}
|
- ${MVN} verify -pl integration-tests -P integration-tests ${TESTNG_GROUPS} ${JVM_RUNTIME} ${MAVEN_SKIP}
|
||||||
after_failure: &integration_test_diags
|
after_failure: &integration_test_diags
|
||||||
- for v in ~/shared/logs/*.log ; do
|
- for v in ~/shared/logs/*.log ; do
|
||||||
echo $v logtail ======================== ; tail -100 $v ;
|
echo $v logtail ======================== ; tail -100 $v ;
|
||||||
|
@ -285,39 +290,109 @@ jobs:
|
||||||
done
|
done
|
||||||
|
|
||||||
- &integration_perfect_rollup_parallel_batch_index
|
- &integration_perfect_rollup_parallel_batch_index
|
||||||
name: "perfect rollup parallel batch index integration test"
|
name: "(Compile=openjdk8, Run=openjdk8) perfect rollup parallel batch index integration test"
|
||||||
|
jdk: openjdk8
|
||||||
services: *integration_test_services
|
services: *integration_test_services
|
||||||
env: TESTNG_GROUPS='-Dgroups=perfect-rollup-parallel-batch-index'
|
env: TESTNG_GROUPS='-Dgroups=perfect-rollup-parallel-batch-index' JVM_RUNTIME='-Djvm.runtime=8'
|
||||||
script: *run_integration_test
|
script: *run_integration_test
|
||||||
after_failure: *integration_test_diags
|
after_failure: *integration_test_diags
|
||||||
|
|
||||||
- &integration_kafka_index
|
- &integration_kafka_index
|
||||||
name: "kafka index integration test"
|
name: "(Compile=openjdk8, Run=openjdk8) kafka index integration test"
|
||||||
|
jdk: openjdk8
|
||||||
services: *integration_test_services
|
services: *integration_test_services
|
||||||
env: TESTNG_GROUPS='-Dgroups=kafka-index'
|
env: TESTNG_GROUPS='-Dgroups=kafka-index' JVM_RUNTIME='-Djvm.runtime=8'
|
||||||
script: *run_integration_test
|
script: *run_integration_test
|
||||||
after_failure: *integration_test_diags
|
after_failure: *integration_test_diags
|
||||||
|
|
||||||
- &integration_query
|
- &integration_query
|
||||||
name: "query integration test"
|
name: "(Compile=openjdk8, Run=openjdk8) query integration test"
|
||||||
|
jdk: openjdk8
|
||||||
services: *integration_test_services
|
services: *integration_test_services
|
||||||
env: TESTNG_GROUPS='-Dgroups=query'
|
env: TESTNG_GROUPS='-Dgroups=query' JVM_RUNTIME='-Djvm.runtime=8'
|
||||||
script: *run_integration_test
|
script: *run_integration_test
|
||||||
after_failure: *integration_test_diags
|
after_failure: *integration_test_diags
|
||||||
|
|
||||||
- &integration_realtime_index
|
- &integration_realtime_index
|
||||||
name: "realtime index integration test"
|
name: "(Compile=openjdk8, Run=openjdk8) realtime index integration test"
|
||||||
|
jdk: openjdk8
|
||||||
services: *integration_test_services
|
services: *integration_test_services
|
||||||
env: TESTNG_GROUPS='-Dgroups=realtime-index'
|
env: TESTNG_GROUPS='-Dgroups=realtime-index' JVM_RUNTIME='-Djvm.runtime=8'
|
||||||
script: *run_integration_test
|
script: *run_integration_test
|
||||||
after_failure: *integration_test_diags
|
after_failure: *integration_test_diags
|
||||||
|
|
||||||
- &integration_tests
|
- &integration_tests
|
||||||
name: "other integration test"
|
name: "(Compile=openjdk8, Run=openjdk8) other integration test"
|
||||||
|
jdk: openjdk8
|
||||||
services: *integration_test_services
|
services: *integration_test_services
|
||||||
env: TESTNG_GROUPS='-DexcludedGroups=batch-index,perfect-rollup-parallel-batch-index,kafka-index,query,realtime-index'
|
env: TESTNG_GROUPS='-DexcludedGroups=batch-index,perfect-rollup-parallel-batch-index,kafka-index,query,realtime-index' JVM_RUNTIME='-Djvm.runtime=8'
|
||||||
script: *run_integration_test
|
script: *run_integration_test
|
||||||
after_failure: *integration_test_diags
|
after_failure: *integration_test_diags
|
||||||
|
# END - Integration tests for Compile with Java 8 and Run with Java 8
|
||||||
|
|
||||||
|
# START - Integration tests for Compile with Java 11 and Run with Java 11
|
||||||
|
- <<: *integration_batch_index
|
||||||
|
name: "(Compile=openjdk11, Run=openjdk11) batch index integration test"
|
||||||
|
jdk: openjdk11
|
||||||
|
env: TESTNG_GROUPS='-Dgroups=batch-index' JVM_RUNTIME='-Djvm.runtime=11'
|
||||||
|
|
||||||
|
- <<: *integration_perfect_rollup_parallel_batch_index
|
||||||
|
name: "(Compile=openjdk11, Run=openjdk11) perfect rollup parallel batch index integration test"
|
||||||
|
jdk: openjdk11
|
||||||
|
env: TESTNG_GROUPS='-Dgroups=perfect-rollup-parallel-batch-index' JVM_RUNTIME='-Djvm.runtime=11'
|
||||||
|
|
||||||
|
- <<: *integration_kafka_index
|
||||||
|
name: "(Compile=openjdk11, Run=openjdk11) kafka index integration test"
|
||||||
|
jdk: openjdk11
|
||||||
|
env: TESTNG_GROUPS='-Dgroups=kafka-index' JVM_RUNTIME='-Djvm.runtime=11'
|
||||||
|
|
||||||
|
- <<: *integration_query
|
||||||
|
name: "(Compile=openjdk11, Run=openjdk11) query integration test"
|
||||||
|
jdk: openjdk11
|
||||||
|
env: TESTNG_GROUPS='-Dgroups=query' JVM_RUNTIME='-Djvm.runtime=11'
|
||||||
|
|
||||||
|
- <<: *integration_realtime_index
|
||||||
|
name: "(Compile=openjdk11, Run=openjdk11) realtime index integration test"
|
||||||
|
jdk: openjdk11
|
||||||
|
env: TESTNG_GROUPS='-Dgroups=realtime-index' JVM_RUNTIME='-Djvm.runtime=11'
|
||||||
|
|
||||||
|
- <<: *integration_tests
|
||||||
|
name: "(Compile=openjdk11, Run=openjdk11) other integration test"
|
||||||
|
jdk: openjdk11
|
||||||
|
env: TESTNG_GROUPS='-DexcludedGroups=batch-index,perfect-rollup-parallel-batch-index,kafka-index,query,realtime-index' JVM_RUNTIME='-Djvm.runtime=11'
|
||||||
|
# END - Integration tests for Compile with Java 11 and Run with Java 11
|
||||||
|
|
||||||
|
# START - Integration tests for Compile with Java 11 and Run with Java 8
|
||||||
|
- <<: *integration_batch_index
|
||||||
|
name: "(Compile=openjdk11, Run=openjdk8) batch index integration test"
|
||||||
|
jdk: openjdk11
|
||||||
|
env: TESTNG_GROUPS='-Dgroups=batch-index' JVM_RUNTIME='-Djvm.runtime=8'
|
||||||
|
|
||||||
|
- <<: *integration_perfect_rollup_parallel_batch_index
|
||||||
|
name: "(Compile=openjdk11, Run=openjdk8) perfect rollup parallel batch index integration test"
|
||||||
|
jdk: openjdk11
|
||||||
|
env: TESTNG_GROUPS='-Dgroups=perfect-rollup-parallel-batch-index' JVM_RUNTIME='-Djvm.runtime=8'
|
||||||
|
|
||||||
|
- <<: *integration_kafka_index
|
||||||
|
name: "(Compile=openjdk11, Run=openjdk8) kafka index integration test"
|
||||||
|
jdk: openjdk11
|
||||||
|
env: TESTNG_GROUPS='-Dgroups=kafka-index' JVM_RUNTIME='-Djvm.runtime=8'
|
||||||
|
|
||||||
|
- <<: *integration_query
|
||||||
|
name: "(Compile=openjdk11, Run=openjdk8) query integration test"
|
||||||
|
jdk: openjdk11
|
||||||
|
env: TESTNG_GROUPS='-Dgroups=query' JVM_RUNTIME='-Djvm.runtime=8'
|
||||||
|
|
||||||
|
- <<: *integration_realtime_index
|
||||||
|
name: "(Compile=openjdk11, Run=openjdk8) realtime index integration test"
|
||||||
|
jdk: openjdk11
|
||||||
|
env: TESTNG_GROUPS='-Dgroups=realtime-index' JVM_RUNTIME='-Djvm.runtime=8'
|
||||||
|
|
||||||
|
- <<: *integration_tests
|
||||||
|
name: "(Compile=openjdk11, Run=openjdk8) other integration test"
|
||||||
|
jdk: openjdk11
|
||||||
|
env: TESTNG_GROUPS='-DexcludedGroups=batch-index,perfect-rollup-parallel-batch-index,kafka-index,query,realtime-index' JVM_RUNTIME='-Djvm.runtime=8'
|
||||||
|
# END - Integration tests for Compile with Java 11 and Run with Java 8
|
||||||
|
|
||||||
- name: "security vulnerabilities"
|
- name: "security vulnerabilities"
|
||||||
stage: cron
|
stage: cron
|
||||||
|
|
|
@ -0,0 +1,108 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
# contributor license agreements. See the NOTICE file distributed with
|
||||||
|
# this work for additional information regarding copyright ownership.
|
||||||
|
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
# (the "License"); you may not use this file except in compliance with
|
||||||
|
# the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
# Cleanup
|
||||||
|
cleanup()
|
||||||
|
{
|
||||||
|
for node in druid-historical druid-coordinator druid-overlord druid-router druid-router-permissive-tls druid-router-no-client-auth-tls druid-router-custom-check-tls druid-broker druid-middlemanager druid-zookeeper-kafka druid-metadata-storage;
|
||||||
|
do
|
||||||
|
docker stop $node
|
||||||
|
docker rm $node
|
||||||
|
done
|
||||||
|
|
||||||
|
docker network rm druid-it-net
|
||||||
|
}
|
||||||
|
|
||||||
|
# Druid environment and jars setup
|
||||||
|
setup()
|
||||||
|
{
|
||||||
|
# environment variables
|
||||||
|
DIR=$(cd $(dirname $0) && pwd)
|
||||||
|
DOCKERDIR=$DIR/docker
|
||||||
|
SHARED_DIR=${HOME}/shared
|
||||||
|
SUPERVISORDIR=/usr/lib/druid/conf
|
||||||
|
RESOURCEDIR=$DIR/src/test/resources
|
||||||
|
|
||||||
|
# so docker IP addr will be known during docker build
|
||||||
|
echo ${DOCKER_IP:=127.0.0.1} > $DOCKERDIR/docker_ip
|
||||||
|
|
||||||
|
# setup client keystore
|
||||||
|
./docker/tls/generate-client-certs-and-keystores.sh
|
||||||
|
rm -rf docker/client_tls
|
||||||
|
cp -r client_tls docker/client_tls
|
||||||
|
|
||||||
|
# Make directories if they dont exist
|
||||||
|
mkdir -p $SHARED_DIR/logs
|
||||||
|
mkdir -p $SHARED_DIR/tasklogs
|
||||||
|
|
||||||
|
# install druid jars
|
||||||
|
rm -rf $SHARED_DIR/docker
|
||||||
|
cp -R docker $SHARED_DIR/docker
|
||||||
|
mvn -B dependency:copy-dependencies -DoutputDirectory=$SHARED_DIR/docker/lib
|
||||||
|
|
||||||
|
# install logging config
|
||||||
|
cp src/main/resources/log4j2.xml $SHARED_DIR/docker/lib/log4j2.xml
|
||||||
|
|
||||||
|
# copy the integration test jar, it provides test-only extension implementations
|
||||||
|
cp target/druid-integration-tests*.jar $SHARED_DIR/docker/lib
|
||||||
|
|
||||||
|
# one of the integration tests needs the wikiticker sample data
|
||||||
|
mkdir -p $SHARED_DIR/wikiticker-it
|
||||||
|
cp ../examples/quickstart/tutorial/wikiticker-2015-09-12-sampled.json.gz $SHARED_DIR/wikiticker-it/wikiticker-2015-09-12-sampled.json.gz
|
||||||
|
cp docker/wiki-simple-lookup.json $SHARED_DIR/wikiticker-it/wiki-simple-lookup.json
|
||||||
|
}
|
||||||
|
|
||||||
|
create_docker_network()
|
||||||
|
{
|
||||||
|
docker network create --subnet=172.172.172.0/24 druid-it-net
|
||||||
|
}
|
||||||
|
|
||||||
|
# Start docker containers for all Druid processes and dependencies
|
||||||
|
start_docker_containers()
|
||||||
|
{
|
||||||
|
# Start zookeeper and kafka
|
||||||
|
docker run -d --privileged --net druid-it-net --ip 172.172.172.2 -e LANG=C.UTF-8 -e LANGUAGE=C.UTF-8 -e LC_ALL=C.UTF-8 --name druid-zookeeper-kafka -p 2181:2181 -p 9092:9092 -p 9093:9093 -v $SHARED_DIR:/shared -v $DOCKERDIR/zookeeper.conf:$SUPERVISORDIR/zookeeper.conf -v $DOCKERDIR/kafka.conf:$SUPERVISORDIR/kafka.conf druid/cluster
|
||||||
|
|
||||||
|
# Start MYSQL
|
||||||
|
docker run -d --privileged --net druid-it-net --ip 172.172.172.3 -e LANG=C.UTF-8 -e LANGUAGE=C.UTF-8 -e LC_ALL=C.UTF-8 --name druid-metadata-storage -v $SHARED_DIR:/shared -v $DOCKERDIR/metadata-storage.conf:$SUPERVISORDIR/metadata-storage.conf druid/cluster
|
||||||
|
|
||||||
|
# Start Overlord
|
||||||
|
docker run -d --privileged --net druid-it-net --ip 172.172.172.4 -e LANG=C.UTF-8 -e LANGUAGE=C.UTF-8 -e LC_ALL=C.UTF-8 --name druid-overlord -p 8090:8090 -p 8290:8290 -v $SHARED_DIR:/shared -v $DOCKERDIR/overlord.conf:$SUPERVISORDIR/overlord.conf --link druid-metadata-storage:druid-metadata-storage --link druid-zookeeper-kafka:druid-zookeeper-kafka druid/cluster
|
||||||
|
|
||||||
|
# Start Coordinator
|
||||||
|
docker run -d --privileged --net druid-it-net --ip 172.172.172.5 -e LANG=C.UTF-8 -e LANGUAGE=C.UTF-8 -e LC_ALL=C.UTF-8 --name druid-coordinator -p 8081:8081 -p 8281:8281 -v $SHARED_DIR:/shared -v $DOCKERDIR/coordinator.conf:$SUPERVISORDIR/coordinator.conf --link druid-overlord:druid-overlord --link druid-metadata-storage:druid-metadata-storage --link druid-zookeeper-kafka:druid-zookeeper-kafka druid/cluster
|
||||||
|
|
||||||
|
# Start Historical
|
||||||
|
docker run -d --privileged --net druid-it-net --ip 172.172.172.6 -e LANG=C.UTF-8 -e LANGUAGE=C.UTF-8 -e LC_ALL=C.UTF-8 --name druid-historical -p 8083:8083 -p 8283:8283 -v $SHARED_DIR:/shared -v $DOCKERDIR/historical.conf:$SUPERVISORDIR/historical.conf --link druid-zookeeper-kafka:druid-zookeeper-kafka druid/cluster
|
||||||
|
|
||||||
|
# Start Middlemanger
|
||||||
|
docker run -d --privileged --net druid-it-net --ip 172.172.172.7 -e LANG=C.UTF-8 -e LANGUAGE=C.UTF-8 -e LC_ALL=C.UTF-8 --name druid-middlemanager -p 8091:8091 -p 8291:8291 -p 8100:8100 -p 8101:8101 -p 8102:8102 -p 8103:8103 -p 8104:8104 -p 8105:8105 -p 8300:8300 -p 8301:8301 -p 8302:8302 -p 8303:8303 -p 8304:8304 -p 8305:8305 -v $RESOURCEDIR:/resources -v $SHARED_DIR:/shared -v $DOCKERDIR/middlemanager.conf:$SUPERVISORDIR/middlemanager.conf --link druid-zookeeper-kafka:druid-zookeeper-kafka --link druid-overlord:druid-overlord druid/cluster
|
||||||
|
|
||||||
|
# Start Broker
|
||||||
|
docker run -d --privileged --net druid-it-net --ip 172.172.172.8 -e LANG=C.UTF-8 -e LANGUAGE=C.UTF-8 -e LC_ALL=C.UTF-8 --name druid-broker -p 8082:8082 -p 8282:8282 -v $SHARED_DIR:/shared -v $DOCKERDIR/broker.conf:$SUPERVISORDIR/broker.conf --link druid-zookeeper-kafka:druid-zookeeper-kafka --link druid-middlemanager:druid-middlemanager --link druid-historical:druid-historical druid/cluster
|
||||||
|
|
||||||
|
# Start Router
|
||||||
|
docker run -d --privileged --net druid-it-net --ip 172.172.172.9 -e LANG=C.UTF-8 -e LANGUAGE=C.UTF-8 -e LC_ALL=C.UTF-8 --name druid-router -p 8888:8888 -p 9088:9088 -v $SHARED_DIR:/shared -v $DOCKERDIR/router.conf:$SUPERVISORDIR/router.conf --link druid-zookeeper-kafka:druid-zookeeper-kafka --link druid-coordinator:druid-coordinator --link druid-broker:druid-broker druid/cluster
|
||||||
|
|
||||||
|
# Start Router with permissive TLS settings (client auth enabled, no hostname verification, no revocation check)
|
||||||
|
docker run -d --privileged --net druid-it-net --ip 172.172.172.10 -e LANG=C.UTF-8 -e LANGUAGE=C.UTF-8 -e LC_ALL=C.UTF-8 --name druid-router-permissive-tls -p 8889:8889 -p 9089:9089 -v $SHARED_DIR:/shared -v $DOCKERDIR/router-permissive-tls.conf:$SUPERVISORDIR/router-permissive-tls.conf --link druid-zookeeper-kafka:druid-zookeeper-kafka --link druid-coordinator:druid-coordinator --link druid-broker:druid-broker druid/cluster
|
||||||
|
|
||||||
|
# Start Router with TLS but no client auth
|
||||||
|
docker run -d --privileged --net druid-it-net --ip 172.172.172.11 -e LANG=C.UTF-8 -e LANGUAGE=C.UTF-8 -e LC_ALL=C.UTF-8 --name druid-router-no-client-auth-tls -p 8890:8890 -p 9090:9090 -v $SHARED_DIR:/shared -v $DOCKERDIR/router-no-client-auth-tls.conf:$SUPERVISORDIR/router-no-client-auth-tls.conf --link druid-zookeeper-kafka:druid-zookeeper-kafka --link druid-coordinator:druid-coordinator --link druid-broker:druid-broker druid/cluster
|
||||||
|
|
||||||
|
# Start Router with custom TLS cert checkers
|
||||||
|
docker run -d --privileged --net druid-it-net --ip 172.172.172.12 -e LANG=C.UTF-8 -e LANGUAGE=C.UTF-8 -e LC_ALL=C.UTF-8 --hostname druid-router-custom-check-tls --name druid-router-custom-check-tls -p 8891:8891 -p 9091:9091 -v $SHARED_DIR:/shared -v $DOCKERDIR/router-custom-check-tls.conf:$SUPERVISORDIR/router-custom-check-tls.conf --link druid-zookeeper-kafka:druid-zookeeper-kafka --link druid-coordinator:druid-coordinator --link druid-broker:druid-broker druid/cluster
|
||||||
|
}
|
|
@ -0,0 +1,31 @@
|
||||||
|
<!--
|
||||||
|
~ Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
~ or more contributor license agreements. See the NOTICE file
|
||||||
|
~ distributed with this work for additional information
|
||||||
|
~ regarding copyright ownership. The ASF licenses this file
|
||||||
|
~ to you under the Apache License, Version 2.0 (the
|
||||||
|
~ "License"); you may not use this file except in compliance
|
||||||
|
~ with the License. You may obtain a copy of the License at
|
||||||
|
~
|
||||||
|
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
~
|
||||||
|
~ Unless required by applicable law or agreed to in writing,
|
||||||
|
~ software distributed under the License is distributed on an
|
||||||
|
~ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
~ KIND, either express or implied. See the License for the
|
||||||
|
~ specific language governing permissions and limitations
|
||||||
|
~ under the License.
|
||||||
|
-->
|
||||||
|
|
||||||
|
## Base Docker Image for Integration Tests
|
||||||
|
|
||||||
|
### Building Docker Image for Integration Tests
|
||||||
|
|
||||||
|
Run the following commands from <DRUIDS_REPO_DIR>/integration-tests/docker-base
|
||||||
|
|
||||||
|
To build docker image for Java JDK8:
|
||||||
|
- docker build -t druidbase:<NEW_TAG> -f jdk8/Dockerfile .
|
||||||
|
|
||||||
|
To build docker image for Java JDK11:
|
||||||
|
- docker build -t druidbase:<NEW_TAG> -f jdk11/Dockerfile .
|
||||||
|
|
|
@ -0,0 +1,62 @@
|
||||||
|
# Based on the following projects/files:
|
||||||
|
# - SequenceIQ hadoop-docker project hosted at https://github.com/sequenceiq/hadoop-docker
|
||||||
|
# - AdoptOpenJDK openjdk-docker project hosted at https://github.com/AdoptOpenJDK/openjdk-docker
|
||||||
|
# and modified at the Apache Software Foundation (ASF).
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
FROM ubuntu:16.04
|
||||||
|
|
||||||
|
# Install Java JDK 11 (OpenJDK 11.0.5)
|
||||||
|
# Sourced from AdoptOpenJDK openjdk-docker project (https://github.com/AdoptOpenJDK/openjdk-docker)
|
||||||
|
RUN apt-get update \
|
||||||
|
&& apt-get install -y --no-install-recommends curl ca-certificates fontconfig locales \
|
||||||
|
&& echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen \
|
||||||
|
&& locale-gen en_US.UTF-8 \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
RUN set -eux; \
|
||||||
|
ARCH="$(dpkg --print-architecture)"; \
|
||||||
|
case "${ARCH}" in \
|
||||||
|
armhf) \
|
||||||
|
ESUM='c6b1fda3f8807028cbfcc34a4ded2e8a5a6b6239d2bcc1f06673ea6b1530df94'; \
|
||||||
|
BINARY_URL='https://github.com/AdoptOpenJDK/openjdk11-binaries/releases/download/jdk-11.0.5%2B10/OpenJDK11U-jdk_arm_linux_hotspot_11.0.5_10.tar.gz'; \
|
||||||
|
;; \
|
||||||
|
ppc64el|ppc64le) \
|
||||||
|
ESUM='d763481ddc29ac0bdefb24216b3a0bf9afbb058552682567a075f9c0f7da5814'; \
|
||||||
|
BINARY_URL='https://github.com/AdoptOpenJDK/openjdk11-binaries/releases/download/jdk-11.0.5%2B10/OpenJDK11U-jdk_ppc64le_linux_hotspot_11.0.5_10.tar.gz'; \
|
||||||
|
;; \
|
||||||
|
amd64|x86_64) \
|
||||||
|
ESUM='6dd0c9c8a740e6c19149e98034fba8e368fd9aa16ab417aa636854d40db1a161'; \
|
||||||
|
BINARY_URL='https://github.com/AdoptOpenJDK/openjdk11-binaries/releases/download/jdk-11.0.5%2B10/OpenJDK11U-jdk_x64_linux_hotspot_11.0.5_10.tar.gz'; \
|
||||||
|
;; \
|
||||||
|
*) \
|
||||||
|
echo "Unsupported arch: ${ARCH}"; \
|
||||||
|
exit 1; \
|
||||||
|
;; \
|
||||||
|
esac; \
|
||||||
|
curl -LfsSo /tmp/openjdk.tar.gz ${BINARY_URL}; \
|
||||||
|
echo "${ESUM} */tmp/openjdk.tar.gz" | sha256sum -c -; \
|
||||||
|
mkdir -p /opt/java/openjdk; \
|
||||||
|
cd /opt/java/openjdk; \
|
||||||
|
tar -xf /tmp/openjdk.tar.gz --strip-components=1; \
|
||||||
|
rm -rf /tmp/openjdk.tar.gz;
|
||||||
|
|
||||||
|
ENV JAVA_HOME=/opt/java/openjdk \
|
||||||
|
PATH="/opt/java/openjdk/bin:$PATH"
|
||||||
|
|
||||||
|
# Bundle everything into one script so cleanup can reduce image size.
|
||||||
|
# Otherwise docker's layered images mean that things are not actually deleted.
|
||||||
|
|
||||||
|
COPY setup.sh /root/setup.sh
|
||||||
|
RUN chmod 0755 /root/setup.sh && /root/setup.sh
|
|
@ -16,6 +16,10 @@
|
||||||
|
|
||||||
FROM ubuntu:16.04
|
FROM ubuntu:16.04
|
||||||
|
|
||||||
|
# Install Java JDK 8
|
||||||
|
RUN apt-get update \
|
||||||
|
&& apt-get install -y openjdk-8-jdk
|
||||||
|
|
||||||
# Bundle everything into one script so cleanup can reduce image size.
|
# Bundle everything into one script so cleanup can reduce image size.
|
||||||
# Otherwise docker's layered images mean that things are not actually deleted.
|
# Otherwise docker's layered images mean that things are not actually deleted.
|
||||||
|
|
|
@ -24,9 +24,6 @@ apt-get update
|
||||||
# wget
|
# wget
|
||||||
apt-get install -y wget
|
apt-get install -y wget
|
||||||
|
|
||||||
# Java
|
|
||||||
apt-get install -y openjdk-8-jdk
|
|
||||||
|
|
||||||
# MySQL (Metadata store)
|
# MySQL (Metadata store)
|
||||||
apt-get install -y mysql-server
|
apt-get install -y mysql-server
|
||||||
|
|
||||||
|
|
|
@ -13,8 +13,17 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
|
# This is default value for base image in case DOCKER_IMAGE is not given when building
|
||||||
|
ARG DOCKER_IMAGE=imply/druiditbase:openjdk-1.8.0_191-1
|
||||||
# Base image is built from integration-tests/docker-base in the Druid repo
|
# Base image is built from integration-tests/docker-base in the Druid repo
|
||||||
FROM imply/druiditbase:0.2
|
FROM $DOCKER_IMAGE
|
||||||
|
|
||||||
|
# Verify Java version
|
||||||
|
ARG DOCKER_IMAGE
|
||||||
|
ENV DOCKER_IMAGE_USED=$DOCKER_IMAGE
|
||||||
|
RUN echo "Built using base docker image DOCKER_IMAGE_USED=$DOCKER_IMAGE_USED"
|
||||||
|
RUN java -version
|
||||||
|
|
||||||
RUN echo "[mysqld]\ncharacter-set-server=utf8\ncollation-server=utf8_bin\n" >> /etc/mysql/my.cnf
|
RUN echo "[mysqld]\ncharacter-set-server=utf8\ncollation-server=utf8_bin\n" >> /etc/mysql/my.cnf
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
[program:kafka]
|
[program:kafka]
|
||||||
command=/usr/local/kafka/bin/kafka-server-start.sh /usr/local/kafka/config/server.properties
|
command=/usr/local/kafka/bin/kafka-server-start.sh /usr/local/kafka/config/server.properties
|
||||||
user=daemon
|
|
||||||
priority=0
|
priority=0
|
||||||
stdout_logfile=/shared/logs/kafka.log
|
stdout_logfile=/shared/logs/kafka.log
|
||||||
|
|
|
@ -12,7 +12,7 @@ command=java
|
||||||
-Ddruid.worker.capacity=3
|
-Ddruid.worker.capacity=3
|
||||||
-Ddruid.indexer.logs.directory=/shared/tasklogs
|
-Ddruid.indexer.logs.directory=/shared/tasklogs
|
||||||
-Ddruid.storage.storageDirectory=/shared/storage
|
-Ddruid.storage.storageDirectory=/shared/storage
|
||||||
-Ddruid.indexer.runner.javaOpts="-server -Xmx256m -Xms256m -XX:NewSize=128m -XX:MaxNewSize=128m -XX:+UseG1GC -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -Duser.timezone=UTC -Dfile.encoding=UTF-8 -Dlog4j.configurationFile=/shared/docker/lib/log4j2.xml"
|
-Ddruid.indexer.runner.javaOpts="-server -Xmx256m -Xms256m -XX:NewSize=128m -XX:MaxNewSize=128m -XX:+UseG1GC -XX:+PrintGCDetails -Duser.timezone=UTC -Dfile.encoding=UTF-8 -Dlog4j.configurationFile=/shared/docker/lib/log4j2.xml"
|
||||||
-Ddruid.indexer.fork.property.druid.processing.buffer.sizeBytes=25000000
|
-Ddruid.indexer.fork.property.druid.processing.buffer.sizeBytes=25000000
|
||||||
-Ddruid.indexer.fork.property.druid.processing.numThreads=1
|
-Ddruid.indexer.fork.property.druid.processing.numThreads=1
|
||||||
-Ddruid.indexer.fork.server.http.numThreads=20
|
-Ddruid.indexer.fork.server.http.numThreads=20
|
||||||
|
|
|
@ -4,7 +4,6 @@ command=java
|
||||||
-Xmx128m
|
-Xmx128m
|
||||||
-XX:+UseConcMarkSweepGC
|
-XX:+UseConcMarkSweepGC
|
||||||
-XX:+PrintGCDetails
|
-XX:+PrintGCDetails
|
||||||
-XX:+PrintGCTimeStamps
|
|
||||||
-Duser.timezone=UTC
|
-Duser.timezone=UTC
|
||||||
-Dfile.encoding=UTF-8
|
-Dfile.encoding=UTF-8
|
||||||
-Ddruid.host=%(ENV_HOST_IP)s
|
-Ddruid.host=%(ENV_HOST_IP)s
|
||||||
|
|
|
@ -4,7 +4,6 @@ command=java
|
||||||
-Xmx128m
|
-Xmx128m
|
||||||
-XX:+UseConcMarkSweepGC
|
-XX:+UseConcMarkSweepGC
|
||||||
-XX:+PrintGCDetails
|
-XX:+PrintGCDetails
|
||||||
-XX:+PrintGCTimeStamps
|
|
||||||
-Duser.timezone=UTC
|
-Duser.timezone=UTC
|
||||||
-Dfile.encoding=UTF-8
|
-Dfile.encoding=UTF-8
|
||||||
-Ddruid.host=%(ENV_HOST_IP)s
|
-Ddruid.host=%(ENV_HOST_IP)s
|
||||||
|
|
|
@ -4,7 +4,6 @@ command=java
|
||||||
-Xmx128m
|
-Xmx128m
|
||||||
-XX:+UseConcMarkSweepGC
|
-XX:+UseConcMarkSweepGC
|
||||||
-XX:+PrintGCDetails
|
-XX:+PrintGCDetails
|
||||||
-XX:+PrintGCTimeStamps
|
|
||||||
-Duser.timezone=UTC
|
-Duser.timezone=UTC
|
||||||
-Dfile.encoding=UTF-8
|
-Dfile.encoding=UTF-8
|
||||||
-Ddruid.host=%(ENV_HOST_IP)s
|
-Ddruid.host=%(ENV_HOST_IP)s
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
[supervisord]
|
[supervisord]
|
||||||
nodaemon=true
|
nodaemon=true
|
||||||
|
logfile = /shared/logs/supervisord.log
|
||||||
|
|
||||||
[include]
|
[include]
|
||||||
files = /usr/lib/druid/conf/*.conf
|
files = /usr/lib/druid/conf/*.conf
|
||||||
|
|
|
@ -265,6 +265,9 @@
|
||||||
<profiles>
|
<profiles>
|
||||||
<profile>
|
<profile>
|
||||||
<id>integration-tests</id>
|
<id>integration-tests</id>
|
||||||
|
<properties>
|
||||||
|
<jvm.runtime>8</jvm.runtime>
|
||||||
|
</properties>
|
||||||
<build>
|
<build>
|
||||||
<plugins>
|
<plugins>
|
||||||
<plugin>
|
<plugin>
|
||||||
|
@ -278,7 +281,7 @@
|
||||||
</goals>
|
</goals>
|
||||||
<phase>pre-integration-test</phase>
|
<phase>pre-integration-test</phase>
|
||||||
<configuration>
|
<configuration>
|
||||||
<executable>${project.basedir}/run_cluster.sh</executable>
|
<executable>${project.basedir}/run_cluster_using_java_runtime_${jvm.runtime}.sh</executable>
|
||||||
</configuration>
|
</configuration>
|
||||||
</execution>
|
</execution>
|
||||||
<execution>
|
<execution>
|
||||||
|
|
|
@ -1,97 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
|
||||||
# contributor license agreements. See the NOTICE file distributed with
|
|
||||||
# this work for additional information regarding copyright ownership.
|
|
||||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
|
||||||
# (the "License"); you may not use this file except in compliance with
|
|
||||||
# the License. You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
# cleanup
|
|
||||||
for node in druid-historical druid-coordinator druid-overlord druid-router druid-router-permissive-tls druid-router-no-client-auth-tls druid-router-custom-check-tls druid-broker druid-middlemanager druid-zookeeper-kafka druid-metadata-storage;
|
|
||||||
do
|
|
||||||
docker stop $node
|
|
||||||
docker rm $node
|
|
||||||
done
|
|
||||||
|
|
||||||
docker network rm druid-it-net
|
|
||||||
|
|
||||||
# environment variables
|
|
||||||
DIR=$(cd $(dirname $0) && pwd)
|
|
||||||
DOCKERDIR=$DIR/docker
|
|
||||||
SHARED_DIR=${HOME}/shared
|
|
||||||
SUPERVISORDIR=/usr/lib/druid/conf
|
|
||||||
RESOURCEDIR=$DIR/src/test/resources
|
|
||||||
|
|
||||||
# so docker IP addr will be known during docker build
|
|
||||||
echo ${DOCKER_IP:=127.0.0.1} > $DOCKERDIR/docker_ip
|
|
||||||
|
|
||||||
# setup client keystore
|
|
||||||
./docker/tls/generate-client-certs-and-keystores.sh
|
|
||||||
rm -rf docker/client_tls
|
|
||||||
cp -r client_tls docker/client_tls
|
|
||||||
|
|
||||||
# Make directories if they dont exist
|
|
||||||
mkdir -p $SHARED_DIR/logs
|
|
||||||
mkdir -p $SHARED_DIR/tasklogs
|
|
||||||
|
|
||||||
# install druid jars
|
|
||||||
rm -rf $SHARED_DIR/docker
|
|
||||||
cp -R docker $SHARED_DIR/docker
|
|
||||||
mvn -B dependency:copy-dependencies -DoutputDirectory=$SHARED_DIR/docker/lib
|
|
||||||
|
|
||||||
# install logging config
|
|
||||||
cp src/main/resources/log4j2.xml $SHARED_DIR/docker/lib/log4j2.xml
|
|
||||||
|
|
||||||
# copy the integration test jar, it provides test-only extension implementations
|
|
||||||
cp target/druid-integration-tests*.jar $SHARED_DIR/docker/lib
|
|
||||||
|
|
||||||
# one of the integration tests needs the wikiticker sample data
|
|
||||||
mkdir -p $SHARED_DIR/wikiticker-it
|
|
||||||
cp ../examples/quickstart/tutorial/wikiticker-2015-09-12-sampled.json.gz $SHARED_DIR/wikiticker-it/wikiticker-2015-09-12-sampled.json.gz
|
|
||||||
cp docker/wiki-simple-lookup.json $SHARED_DIR/wikiticker-it/wiki-simple-lookup.json
|
|
||||||
|
|
||||||
docker network create --subnet=172.172.172.0/24 druid-it-net
|
|
||||||
|
|
||||||
# Build Druid Cluster Image
|
|
||||||
docker build -t druid/cluster $SHARED_DIR/docker
|
|
||||||
|
|
||||||
# Start zookeeper and kafka
|
|
||||||
docker run -d --privileged --net druid-it-net --ip 172.172.172.2 -e LANG=C.UTF-8 -e LANGUAGE=C.UTF-8 -e LC_ALL=C.UTF-8 --name druid-zookeeper-kafka -p 2181:2181 -p 9092:9092 -p 9093:9093 -v $SHARED_DIR:/shared -v $DOCKERDIR/zookeeper.conf:$SUPERVISORDIR/zookeeper.conf -v $DOCKERDIR/kafka.conf:$SUPERVISORDIR/kafka.conf druid/cluster
|
|
||||||
|
|
||||||
# Start MYSQL
|
|
||||||
docker run -d --privileged --net druid-it-net --ip 172.172.172.3 -e LANG=C.UTF-8 -e LANGUAGE=C.UTF-8 -e LC_ALL=C.UTF-8 --name druid-metadata-storage -v $SHARED_DIR:/shared -v $DOCKERDIR/metadata-storage.conf:$SUPERVISORDIR/metadata-storage.conf druid/cluster
|
|
||||||
|
|
||||||
# Start Overlord
|
|
||||||
docker run -d --privileged --net druid-it-net --ip 172.172.172.4 -e LANG=C.UTF-8 -e LANGUAGE=C.UTF-8 -e LC_ALL=C.UTF-8 --name druid-overlord -p 8090:8090 -p 8290:8290 -v $SHARED_DIR:/shared -v $DOCKERDIR/overlord.conf:$SUPERVISORDIR/overlord.conf --link druid-metadata-storage:druid-metadata-storage --link druid-zookeeper-kafka:druid-zookeeper-kafka druid/cluster
|
|
||||||
|
|
||||||
# Start Coordinator
|
|
||||||
docker run -d --privileged --net druid-it-net --ip 172.172.172.5 -e LANG=C.UTF-8 -e LANGUAGE=C.UTF-8 -e LC_ALL=C.UTF-8 --name druid-coordinator -p 8081:8081 -p 8281:8281 -v $SHARED_DIR:/shared -v $DOCKERDIR/coordinator.conf:$SUPERVISORDIR/coordinator.conf --link druid-overlord:druid-overlord --link druid-metadata-storage:druid-metadata-storage --link druid-zookeeper-kafka:druid-zookeeper-kafka druid/cluster
|
|
||||||
|
|
||||||
# Start Historical
|
|
||||||
docker run -d --privileged --net druid-it-net --ip 172.172.172.6 -e LANG=C.UTF-8 -e LANGUAGE=C.UTF-8 -e LC_ALL=C.UTF-8 --name druid-historical -p 8083:8083 -p 8283:8283 -v $SHARED_DIR:/shared -v $DOCKERDIR/historical.conf:$SUPERVISORDIR/historical.conf --link druid-zookeeper-kafka:druid-zookeeper-kafka druid/cluster
|
|
||||||
|
|
||||||
# Start Middlemanger
|
|
||||||
docker run -d --privileged --net druid-it-net --ip 172.172.172.7 -e LANG=C.UTF-8 -e LANGUAGE=C.UTF-8 -e LC_ALL=C.UTF-8 --name druid-middlemanager -p 8091:8091 -p 8291:8291 -p 8100:8100 -p 8101:8101 -p 8102:8102 -p 8103:8103 -p 8104:8104 -p 8105:8105 -p 8300:8300 -p 8301:8301 -p 8302:8302 -p 8303:8303 -p 8304:8304 -p 8305:8305 -v $RESOURCEDIR:/resources -v $SHARED_DIR:/shared -v $DOCKERDIR/middlemanager.conf:$SUPERVISORDIR/middlemanager.conf --link druid-zookeeper-kafka:druid-zookeeper-kafka --link druid-overlord:druid-overlord druid/cluster
|
|
||||||
|
|
||||||
# Start Broker
|
|
||||||
docker run -d --privileged --net druid-it-net --ip 172.172.172.8 -e LANG=C.UTF-8 -e LANGUAGE=C.UTF-8 -e LC_ALL=C.UTF-8 --name druid-broker -p 8082:8082 -p 8282:8282 -v $SHARED_DIR:/shared -v $DOCKERDIR/broker.conf:$SUPERVISORDIR/broker.conf --link druid-zookeeper-kafka:druid-zookeeper-kafka --link druid-middlemanager:druid-middlemanager --link druid-historical:druid-historical druid/cluster
|
|
||||||
|
|
||||||
# Start Router
|
|
||||||
docker run -d --privileged --net druid-it-net --ip 172.172.172.9 -e LANG=C.UTF-8 -e LANGUAGE=C.UTF-8 -e LC_ALL=C.UTF-8 --name druid-router -p 8888:8888 -p 9088:9088 -v $SHARED_DIR:/shared -v $DOCKERDIR/router.conf:$SUPERVISORDIR/router.conf --link druid-zookeeper-kafka:druid-zookeeper-kafka --link druid-coordinator:druid-coordinator --link druid-broker:druid-broker druid/cluster
|
|
||||||
|
|
||||||
# Start Router with permissive TLS settings (client auth enabled, no hostname verification, no revocation check)
|
|
||||||
docker run -d --privileged --net druid-it-net --ip 172.172.172.10 -e LANG=C.UTF-8 -e LANGUAGE=C.UTF-8 -e LC_ALL=C.UTF-8 --name druid-router-permissive-tls -p 8889:8889 -p 9089:9089 -v $SHARED_DIR:/shared -v $DOCKERDIR/router-permissive-tls.conf:$SUPERVISORDIR/router-permissive-tls.conf --link druid-zookeeper-kafka:druid-zookeeper-kafka --link druid-coordinator:druid-coordinator --link druid-broker:druid-broker druid/cluster
|
|
||||||
|
|
||||||
# Start Router with TLS but no client auth
|
|
||||||
docker run -d --privileged --net druid-it-net --ip 172.172.172.11 -e LANG=C.UTF-8 -e LANGUAGE=C.UTF-8 -e LC_ALL=C.UTF-8 --name druid-router-no-client-auth-tls -p 8890:8890 -p 9090:9090 -v $SHARED_DIR:/shared -v $DOCKERDIR/router-no-client-auth-tls.conf:$SUPERVISORDIR/router-no-client-auth-tls.conf --link druid-zookeeper-kafka:druid-zookeeper-kafka --link druid-coordinator:druid-coordinator --link druid-broker:druid-broker druid/cluster
|
|
||||||
|
|
||||||
# Start Router with custom TLS cert checkers
|
|
||||||
docker run -d --privileged --net druid-it-net --ip 172.172.172.12 -e LANG=C.UTF-8 -e LANGUAGE=C.UTF-8 -e LC_ALL=C.UTF-8 --hostname druid-router-custom-check-tls --name druid-router-custom-check-tls -p 8891:8891 -p 9091:9091 -v $SHARED_DIR:/shared -v $DOCKERDIR/router-custom-check-tls.conf:$SUPERVISORDIR/router-custom-check-tls.conf --link druid-zookeeper-kafka:druid-zookeeper-kafka --link druid-coordinator:druid-coordinator --link druid-broker:druid-broker druid/cluster
|
|
|
@ -0,0 +1,31 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
# contributor license agreements. See the NOTICE file distributed with
|
||||||
|
# this work for additional information regarding copyright ownership.
|
||||||
|
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
# (the "License"); you may not use this file except in compliance with
|
||||||
|
# the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
# Source the common script
|
||||||
|
DIR="${BASH_SOURCE%/*}"
|
||||||
|
if [[ ! -d "$DIR" ]]; then DIR="$PWD"; fi
|
||||||
|
. "$DIR/common_run_cluster.sh"
|
||||||
|
|
||||||
|
cleanup
|
||||||
|
|
||||||
|
setup
|
||||||
|
|
||||||
|
create_docker_network
|
||||||
|
|
||||||
|
# Build Druid Cluster Image (Image running Java 11)
|
||||||
|
docker build -t druid/cluster --build-arg DOCKER_IMAGE=imply/druiditbase:openjdk-11.0.5-1 $SHARED_DIR/docker
|
||||||
|
|
||||||
|
start_docker_containers
|
|
@ -0,0 +1,31 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
# contributor license agreements. See the NOTICE file distributed with
|
||||||
|
# this work for additional information regarding copyright ownership.
|
||||||
|
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
# (the "License"); you may not use this file except in compliance with
|
||||||
|
# the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
# Source the common script
|
||||||
|
DIR="${BASH_SOURCE%/*}"
|
||||||
|
if [[ ! -d "$DIR" ]]; then DIR="$PWD"; fi
|
||||||
|
. "$DIR/common_run_cluster.sh"
|
||||||
|
|
||||||
|
cleanup
|
||||||
|
|
||||||
|
setup
|
||||||
|
|
||||||
|
create_docker_network
|
||||||
|
|
||||||
|
# Build Druid Cluster Image (Image running Java 8)
|
||||||
|
docker build -t druid/cluster --build-arg DOCKER_IMAGE=imply/druiditbase:openjdk-1.8.0_191-1 $SHARED_DIR/docker
|
||||||
|
|
||||||
|
start_docker_containers
|
Loading…
Reference in New Issue