druid/integration-tests/docker/druid.sh

115 lines
5.2 KiB
Bash
Raw Normal View History

#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
getConfPath()
{
cluster_conf_base=/tmp/conf/druid/cluster
case "$1" in
_common) echo $cluster_conf_base/_common ;;
historical) echo $cluster_conf_base/data/historical ;;
historical-for-query-error-test) echo $cluster_conf_base/data/historical ;;
middleManager) echo $cluster_conf_base/data/middleManager ;;
Fixes and tests related to the Indexer process. (#10631) * Fixes and tests related to the Indexer process. Three bugs fixed: 1) Indexers would not announce themselves as segment servers if they did not have storage locations defined. This used to work, but was broken in #9971. Fixed this by adding an "isSegmentServer" method to ServerType and updating SegmentLoadDropHandler to always announce if this method returns true. 2) Certain batch task types were written in a way that assumed "isReady" would be called before "run", which is not guaranteed. In particular, they relied on it in order to initialize "taskLockHelper". Fixed this by updating AbstractBatchIndexTask to ensure "isReady" is called before "run" for these tasks. 3) UnifiedIndexerAppenderatorsManager did not properly handle complex datasources. Introduced DataSourceAnalysis in order to fix this. Test changes: 1) Add a new "docker-compose.cli-indexer.yml" config that spins up an Indexer instead of a MiddleManager. 2) Introduce a "USE_INDEXER" environment variable that determines if docker-compose will start up an Indexer or a MiddleManager. 3) Duplicate all the jdk8 tests and run them in both MiddleManager and Indexer mode. 4) Various adjustments to encourage fail-fast errors in the Docker build scripts. 5) Various adjustments to speed up integration tests and reduce memory usage. 6) Add another Mac-specific approach to determining a machine's own IP. This was useful on my development machine. 7) Update segment-count check in ITCompactionTaskTest to eliminate a race condition (it was looking for 6 segments, which only exist together briefly, until the older 4 are marked unused). Javadoc updates: 1) AbstractBatchIndexTask: Added javadocs to determineLockGranularityXXX that make it clear when taskLockHelper will be initialized as a side effect. (Related to the second bug above.) 2) Task: Clarified that "isReady" is not guaranteed to be called before "run". It was already implied, but now it's explicit. 3) ZkCoordinator: Clarified deprecation message. 4) DataSegmentServerAnnouncer: Clarified deprecation message. * Fix stop_cluster script. * Fix sanity check in script. * Fix hashbang lines. * Test and doc adjustments. * Additional tests, and adjustments for tests. * Split ITs back out. * Revert change to druid_coordinator_period_indexingPeriod. * Set Indexer capacity to match MM. * Bump up Historical memory. * Bump down coordinator, overlord memory. * Bump up Broker memory.
2020-12-08 19:02:26 -05:00
indexer) echo $cluster_conf_base/data/indexer ;;
coordinator) echo $cluster_conf_base/master/coordinator ;;
broker) echo $cluster_conf_base/query/broker ;;
router) echo $cluster_conf_base/query/router ;;
overlord) echo $cluster_conf_base/master/overlord ;;
*) echo $cluster_conf_base/misc/$1 ;;
esac
}
# Delete the old key (if existing) and append new key=value
setKey()
{
service="$1"
key="$2"
value="$3"
service_conf=$(getConfPath $service)/runtime.properties
# Delete from all
sed -ri "/$key=/d" $COMMON_CONF_DIR/common.runtime.properties
[ -f $service_conf ] && sed -ri "/$key=/d" $service_conf
[ -f $service_conf ] && echo "$key=$value" >>$service_conf
[ -f $service_conf ] || echo "$key=$value" >>$COMMON_CONF_DIR/common.runtime.properties
echo "Setting $key=$value in $service_conf"
}
setupConfig()
{
echo "$(date -Is) configuring service $DRUID_SERVICE"
# We put all the config in /tmp/conf to allow for a
# read-only root filesystem
mkdir -p /tmp/conf/druid
COMMON_CONF_DIR=$(getConfPath _common)
SERVICE_CONF_DIR=$(getConfPath ${DRUID_SERVICE})
mkdir -p $COMMON_CONF_DIR
mkdir -p $SERVICE_CONF_DIR
touch $COMMON_CONF_DIR/common.runtime.properties
touch $SERVICE_CONF_DIR/runtime.properties
setKey $DRUID_SERVICE druid.host $(resolveip -s $HOSTNAME)
setKey $DRUID_SERVICE druid.worker.ip $(resolveip -s $HOSTNAME)
# Write out all the environment variables starting with druid_ to druid service config file
# This will replace _ with . in the key
env | grep ^druid_ | while read evar;
do
# Can't use IFS='=' to parse since var might have = in it (e.g. password)
val=$(echo "$evar" | sed -e 's?[^=]*=??')
var=$(echo "$evar" | sed -e 's?^\([^=]*\)=.*?\1?g' -e 's?_?.?g')
setKey $DRUID_SERVICE "$var" "$val"
done
}
setupData()
{
# The "query" and "security" test groups require data to be setup before running the tests.
# In particular, they requires segments to be download from a pre-existing s3 bucket.
# This is done by using the loadSpec put into metadatastore and s3 credientials set below.
if [ "$DRUID_INTEGRATION_TEST_GROUP" = "query" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "query-retry" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "query-error" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "high-availability" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "security" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "ldap-security" ]; then
# touch is needed because OverlayFS's copy-up operation breaks POSIX standards. See https://github.com/docker/for-linux/issues/72.
find /var/lib/mysql -type f -exec touch {} \; && service mysql start \
&& cat /test-data/${DRUID_INTEGRATION_TEST_GROUP}-sample-data.sql | mysql -u root druid && /etc/init.d/mysql stop
# below s3 credentials needed to access the pre-existing s3 bucket
2021-03-29 15:33:49 -04:00
setKey $DRUID_SERVICE druid.s3.accessKey AKIAT2GGLKKJQCMG64V4
setKey $DRUID_SERVICE druid.s3.secretKey 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
if [ "$DRUID_INTEGRATION_TEST_GROUP" = "query-retry" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "query-error" ] || [ "$DRUID_INTEGRATION_TEST_GROUP" = "high-availability" ]; then
setKey $DRUID_SERVICE druid.extensions.loadList [\"druid-s3-extensions\",\"druid-integration-tests\"]
else
setKey $DRUID_SERVICE druid.extensions.loadList [\"druid-s3-extensions\"]
fi
# The region of the sample data s3 blobs needed for these test groups
export AWS_REGION=us-east-1
fi
# The SqlInputSource tests in the "input-source" test group require data to be setup in MySQL before running the tests.
if [ "$DRUID_INTEGRATION_TEST_GROUP" = "input-source" ] ; then
# touch is needed because OverlayFS's copy-up operation breaks POSIX standards. See https://github.com/docker/for-linux/issues/72.
find /var/lib/mysql -type f -exec touch {} \; && service mysql start \
&& echo "GRANT ALL ON sqlinputsource.* TO 'druid'@'%'; CREATE database sqlinputsource DEFAULT CHARACTER SET utf8mb4;" | mysql -u root druid \
&& cat /test-data/sql-input-source-sample-data.sql | mysql -u root druid \
&& /etc/init.d/mysql stop
fi
}