mirror of
https://github.com/apache/druid.git
synced 2025-02-18 07:56:44 +00:00
Claim full support for Java 17. (#14384)
* Claim full support for Java 17. No production code has changed, except the startup scripts. Changes: 1) Allow Java 17 without DRUID_SKIP_JAVA_CHECK. 2) Include the full list of opens and exports on both Java 11 and 17. 3) Document that Java 17 is both supported and preferred. 4) Switch some tests from Java 11 to 17 to get better coverage on the preferred version. * Doc update. * Update errorprone. * Update docker_build_containers.sh. * Update errorprone in licenses.yaml. * Add some more run-javas. * Additional run-javas. * Update errorprone. * Suppress new errorprone error. * Add exports and opens in ForkingTaskRunner for Java 11+. Test, doc changes. * Additional errorprone updates. * Update for errorprone. * Restore old fomatting in LdapCredentialsValidator. * Copy bin/ too. * Fix Java 15, 17 build line in docker_build_containers.sh. * Update busybox image. * One more java command. * Fix interpolation. * IT commandline refinements. * Switch to busybox 1.34.1-glibc. * POM adjustments, build and test one IT on 17. * Additional debugging. * Fix silly thing. * Adjust command line. * Add exports and opens one more place. * Additional harmonization of strong encapsulation parameters.
This commit is contained in:
parent
5f94a2a9c2
commit
63ee69b4e8
7
.github/workflows/reusable-standard-its.yml
vendored
7
.github/workflows/reusable-standard-its.yml
vendored
@ -92,6 +92,9 @@ jobs:
|
|||||||
if: ${{ failure() && steps.run-it.conclusion == 'failure' }}
|
if: ${{ failure() && steps.run-it.conclusion == 'failure' }}
|
||||||
run: |
|
run: |
|
||||||
for v in broker router ${{ inputs.use_indexer }} historical coordinator overlord; do
|
for v in broker router ${{ inputs.use_indexer }} historical coordinator overlord; do
|
||||||
echo "------------------------druid-"$v"-------------------------";
|
echo "=======================druid-"$v"========================";
|
||||||
sudo docker exec druid-"$v" tail -1000 /shared/logs/"$v".log;
|
echo "-----------------------docker logs-----------------------";
|
||||||
|
sudo docker logs druid-"$v" 2>&1 | tail -1000 ||:;
|
||||||
|
echo "-----------------------service logs----------------------";
|
||||||
|
sudo docker exec druid-"$v" tail -1000 /shared/logs/"$v".log 2>&1 ||:;
|
||||||
done
|
done
|
||||||
|
6
.github/workflows/standard-its.yml
vendored
6
.github/workflows/standard-its.yml
vendored
@ -93,11 +93,11 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
jdk: [8, 11]
|
jdk: [8, 17]
|
||||||
uses: ./.github/workflows/reusable-standard-its.yml
|
uses: ./.github/workflows/reusable-standard-its.yml
|
||||||
if: ${{ needs.changes.outputs.core == 'true' || needs.changes.outputs.common-extensions == 'true' }}
|
if: ${{ needs.changes.outputs.core == 'true' || needs.changes.outputs.common-extensions == 'true' }}
|
||||||
with:
|
with:
|
||||||
build_jdk: 8
|
build_jdk: ${{ matrix.jdk }}
|
||||||
runtime_jdk: ${{ matrix.jdk }}
|
runtime_jdk: ${{ matrix.jdk }}
|
||||||
testing_groups: -Dgroups=query
|
testing_groups: -Dgroups=query
|
||||||
use_indexer: middleManager
|
use_indexer: middleManager
|
||||||
@ -177,7 +177,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
for v in broker middlemanager router coordinator historical ; do
|
for v in broker middlemanager router coordinator historical ; do
|
||||||
echo "------------------------druid-tiny-cluster-"$v"s-0-------------------------";
|
echo "------------------------druid-tiny-cluster-"$v"s-0-------------------------";
|
||||||
sudo /usr/local/bin/kubectl logs --tail 1000 druid-tiny-cluster-"$v"s-0;
|
sudo /usr/local/bin/kubectl logs --tail 1000 druid-tiny-cluster-"$v"s-0 ||:;
|
||||||
done
|
done
|
||||||
|
|
||||||
integration-other-tests:
|
integration-other-tests:
|
||||||
|
10
.github/workflows/static-checks.yml
vendored
10
.github/workflows/static-checks.yml
vendored
@ -69,9 +69,9 @@ jobs:
|
|||||||
if: ${{ matrix.java == 'jdk8' }}
|
if: ${{ matrix.java == 'jdk8' }}
|
||||||
run: ./check_test_suite_test.py
|
run: ./check_test_suite_test.py
|
||||||
|
|
||||||
- name: (openjdk11) strict compilation
|
- name: (openjdk17) strict compilation
|
||||||
if: ${{ matrix.java == 'jdk11' }}
|
if: ${{ matrix.java == 'jdk17' }}
|
||||||
# errorprone requires JDK 11
|
# errorprone requires JDK 11+
|
||||||
# Strict compilation requires more than 2 GB
|
# Strict compilation requires more than 2 GB
|
||||||
run: ${MVN} clean -DstrictCompile compile test-compile --fail-at-end ${MAVEN_SKIP} ${MAVEN_SKIP_TESTS}
|
run: ${MVN} clean -DstrictCompile compile test-compile --fail-at-end ${MAVEN_SKIP} ${MAVEN_SKIP_TESTS}
|
||||||
|
|
||||||
@ -160,9 +160,9 @@ jobs:
|
|||||||
- name: checkout branch
|
- name: checkout branch
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
- name: setup JDK11
|
- name: setup JDK17
|
||||||
run: |
|
run: |
|
||||||
echo "JAVA_HOME=$JAVA_HOME_11_X64" >> $GITHUB_ENV
|
echo "JAVA_HOME=$JAVA_HOME_17_X64" >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: setup node
|
- name: setup node
|
||||||
uses: actions/setup-node@v3
|
uses: actions/setup-node@v3
|
||||||
|
@ -152,4 +152,4 @@ fi
|
|||||||
# take the ${TASK_JSON} environment variable and base64 decode, unzip and throw it in ${TASK_DIR}/task.json
|
# take the ${TASK_JSON} environment variable and base64 decode, unzip and throw it in ${TASK_DIR}/task.json
|
||||||
mkdir -p ${TASK_DIR}; echo ${TASK_JSON} | base64 -d | gzip -d > ${TASK_DIR}/task.json;
|
mkdir -p ${TASK_DIR}; echo ${TASK_JSON} | base64 -d | gzip -d > ${TASK_DIR}/task.json;
|
||||||
|
|
||||||
exec java ${JAVA_OPTS} -cp $COMMON_CONF_DIR:$SERVICE_CONF_DIR:lib/*: org.apache.druid.cli.Main internal peon $@
|
exec bin/run-java ${JAVA_OPTS} -cp $COMMON_CONF_DIR:$SERVICE_CONF_DIR:lib/*: org.apache.druid.cli.Main internal peon $@
|
||||||
|
@ -86,7 +86,7 @@
|
|||||||
<includes>
|
<includes>
|
||||||
<include>*</include>
|
<include>*</include>
|
||||||
</includes>
|
</includes>
|
||||||
<fileMode>744</fileMode>
|
<fileMode>755</fileMode>
|
||||||
<outputDirectory>bin</outputDirectory>
|
<outputDirectory>bin</outputDirectory>
|
||||||
</fileSet>
|
</fileSet>
|
||||||
</fileSets>
|
</fileSets>
|
||||||
|
@ -108,7 +108,7 @@ For more information about the connection options, see [Client Reference](https:
|
|||||||
|
|
||||||
Make sure you meet the following requirements before trying these examples:
|
Make sure you meet the following requirements before trying these examples:
|
||||||
|
|
||||||
- A supported Java version, such as Java 8
|
- A supported [Java version](../operations/java.md)
|
||||||
|
|
||||||
- [Avatica JDBC driver](https://calcite.apache.org/avatica/downloads/). You can add the JAR to your `CLASSPATH` directly or manage it externally, such as through Maven and a `pom.xml` file.
|
- [Avatica JDBC driver](https://calcite.apache.org/avatica/downloads/). You can add the JAR to your `CLASSPATH` directly or manage it externally, such as through Maven and a `pom.xml` file.
|
||||||
|
|
||||||
|
@ -134,19 +134,6 @@ Java runtime itself.
|
|||||||
This file is not rotated, but it is generally small due to the low volume of messages.
|
This file is not rotated, but it is generally small due to the low volume of messages.
|
||||||
If necessary, you can truncate it using the Linux command `truncate --size 0 log/historical.stdout.log`.
|
If necessary, you can truncate it using the Linux command `truncate --size 0 log/historical.stdout.log`.
|
||||||
|
|
||||||
## Avoid reflective access warnings in logs
|
|
||||||
|
|
||||||
On Java 11, you may see warnings like the following in the logs:
|
|
||||||
|
|
||||||
```
|
|
||||||
WARNING: An illegal reflective access operation has occurred
|
|
||||||
WARNING: Use --illegal-access=warn to enable warnings of further illegal reflective access operations
|
|
||||||
WARNING: All illegal access operations will be denied in a future release
|
|
||||||
```
|
|
||||||
|
|
||||||
To avoid these, add the `--add-exports` and `--add-opens` command line parameters described in the documentation section
|
|
||||||
about [Java strong encapsulation](../operations/java.md#strong-encapsulation).
|
|
||||||
|
|
||||||
## Set the logs to asynchronously write
|
## Set the logs to asynchronously write
|
||||||
|
|
||||||
If your logs are really chatty, you can set them to write asynchronously.
|
If your logs are really chatty, you can set them to write asynchronously.
|
||||||
|
@ -59,7 +59,7 @@ You also need to include the [Hadoop AWS module](https://hadoop.apache.org/docs/
|
|||||||
Run the below command to install the `hadoop-aws.jar` file under `${DRUID_HOME}/extensions/druid-hdfs-storage` in all nodes.
|
Run the below command to install the `hadoop-aws.jar` file under `${DRUID_HOME}/extensions/druid-hdfs-storage` in all nodes.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
java -classpath "${DRUID_HOME}lib/*" org.apache.druid.cli.Main tools pull-deps -h "org.apache.hadoop:hadoop-aws:${HADOOP_VERSION}";
|
${DRUID_HOME}/bin/run-java -classpath "${DRUID_HOME}/lib/*" org.apache.druid.cli.Main tools pull-deps -h "org.apache.hadoop:hadoop-aws:${HADOOP_VERSION}";
|
||||||
cp ${DRUID_HOME}/hadoop-dependencies/hadoop-aws/${HADOOP_VERSION}/hadoop-aws-${HADOOP_VERSION}.jar ${DRUID_HOME}/extensions/druid-hdfs-storage/
|
cp ${DRUID_HOME}/hadoop-dependencies/hadoop-aws/${HADOOP_VERSION}/hadoop-aws-${HADOOP_VERSION}.jar ${DRUID_HOME}/extensions/druid-hdfs-storage/
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -27,14 +27,13 @@ a Java runtime for Druid.
|
|||||||
|
|
||||||
## Selecting a Java runtime
|
## Selecting a Java runtime
|
||||||
|
|
||||||
Druid fully supports Java 8 and 11, and has experimental support for [Java 17](#java-17).
|
Druid fully supports Java 8u92+, Java 11, and Java 17. The project team recommends Java 17.
|
||||||
The project team recommends Java 11.
|
|
||||||
|
|
||||||
The project team recommends using an OpenJDK-based Java distribution. There are many free and actively-supported
|
The project team recommends using an OpenJDK-based Java distribution. There are many free and actively-supported
|
||||||
distributions available, including
|
distributions available, including
|
||||||
[Amazon Corretto](https://docs.aws.amazon.com/corretto/latest/corretto-11-ug/what-is-corretto-11.html),
|
[Amazon Corretto](https://docs.aws.amazon.com/corretto/latest/corretto-17-ug/what-is-corretto-17.html),
|
||||||
[Azul Zulu](https://www.azul.com/downloads/?version=java-11-lts&package=jdk), and
|
[Azul Zulu](https://www.azul.com/downloads/?version=java-17-lts&package=jdk), and
|
||||||
[Eclipse Temurin](https://adoptium.net/temurin/releases?version=11).
|
[Eclipse Temurin](https://adoptium.net/temurin/releases?version=17).
|
||||||
The project team does not recommend any specific distribution over any other.
|
The project team does not recommend any specific distribution over any other.
|
||||||
|
|
||||||
Druid relies on the environment variables `JAVA_HOME` or `DRUID_JAVA_HOME` to find Java on the machine. You can set
|
Druid relies on the environment variables `JAVA_HOME` or `DRUID_JAVA_HOME` to find Java on the machine. You can set
|
||||||
@ -44,7 +43,8 @@ Druid relies on the environment variables `JAVA_HOME` or `DRUID_JAVA_HOME` to fi
|
|||||||
## Garbage collection
|
## Garbage collection
|
||||||
|
|
||||||
In general, the project team recommends using the G1 collector with default settings. This is the default collector in
|
In general, the project team recommends using the G1 collector with default settings. This is the default collector in
|
||||||
Java 11. To enable G1 on Java 8, use `-XX:+UseG1GC`. There is no harm in explicitly specifying this on Java 11 as well.
|
Java 11 and 17. To enable G1 on Java 8, use `-XX:+UseG1GC`. There is no harm in explicitly specifying this on Java 11
|
||||||
|
or 17 as well.
|
||||||
|
|
||||||
Garbage collector selection and tuning is a form of sport in the Java community. There may be situations where adjusting
|
Garbage collector selection and tuning is a form of sport in the Java community. There may be situations where adjusting
|
||||||
garbage collection configuration improves or worsens performance. The project team's guidance is that most people do
|
garbage collection configuration improves or worsens performance. The project team's guidance is that most people do
|
||||||
@ -52,10 +52,11 @@ not need to stray away from G1 with default settings.
|
|||||||
|
|
||||||
## Strong encapsulation
|
## Strong encapsulation
|
||||||
|
|
||||||
Java 9 and beyond (including Java 11) include the capability for
|
Java 9 and beyond (including Java 11 and 17) include the capability for
|
||||||
[strong encapsulation](https://dev.java/learn/strong-encapsulation-\(of-jdk-internals\)/) of internal JDK APIs. Druid
|
[strong encapsulation](https://dev.java/learn/strong-encapsulation-\(of-jdk-internals\)/) of internal JDK APIs. Druid
|
||||||
uses certain internal JDK APIs for functionality- and performance-related reasons. In Java 11, this leads to log
|
uses certain internal JDK APIs, which must be added to `--add-exports` and `--add-opens` on the Java command line.
|
||||||
messages like the following:
|
|
||||||
|
On Java 11, if these parameters are not included, you will see warnings like the following:
|
||||||
|
|
||||||
```
|
```
|
||||||
WARNING: An illegal reflective access operation has occurred
|
WARNING: An illegal reflective access operation has occurred
|
||||||
@ -63,51 +64,35 @@ WARNING: Use --illegal-access=warn to enable warnings of further illegal reflect
|
|||||||
WARNING: All illegal access operations will be denied in a future release
|
WARNING: All illegal access operations will be denied in a future release
|
||||||
```
|
```
|
||||||
|
|
||||||
These warning messages are harmless, and can be ignored. However, you can avoid them entirely if you wish by adding the
|
On Java 17, if these parameters are not included, you will see errors on startup like the following:
|
||||||
following Java command line parameters. These parameters are not part of the default configurations that ship with
|
|
||||||
Druid, because Java 8 does not recognize these parameters and fails to start up if they are provided.
|
|
||||||
|
|
||||||
To do this, add the following lines to your `jvm.config` files:
|
|
||||||
|
|
||||||
```
|
```
|
||||||
--add-exports=java.base/jdk.internal.ref=ALL-UNNAMED
|
Exception in thread "main" java.lang.ExceptionInInitializerError
|
||||||
--add-exports=java.base/jdk.internal.misc=ALL-UNNAMED
|
|
||||||
--add-opens=java.base/java.lang=ALL-UNNAMED
|
|
||||||
--add-opens=java.base/java.io=ALL-UNNAMED
|
|
||||||
--add-opens=java.base/java.nio=ALL-UNNAMED
|
|
||||||
--add-opens=java.base/jdk.internal.ref=ALL-UNNAMED
|
|
||||||
--add-opens=java.base/sun.nio.ch=ALL-UNNAMED
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Additionally, tasks run by [MiddleManagers](../design/architecture.md) execute in separate JVMs. The command line for
|
Druid's out-of-box configuration adds these parameters transparently when you use the bundled `bin/start-druid` or
|
||||||
these JVMs is given by `druid.indexer.runner.javaOptsArray` or `druid.indexer.runner.javaOpts` in
|
similar commands. In this case, there is nothing special you need to do to run successfully on Java 11 or 17. However,
|
||||||
`middleManager/runtime.properties`. Java command line parameters for tasks must be specified here. For example, use
|
if you have customized your Druid service launching system, you will need to ensure the required Java parameters are
|
||||||
a line like the following:
|
added. There are many ways of doing this. Choose the one that works best for you.
|
||||||
|
|
||||||
|
1. The simplest approach: use Druid's bundled `bin/start-druid` script to launch Druid.
|
||||||
|
|
||||||
|
2. If you launch Druid using `bin/supervise -c <config>`, ensure your config file uses `bin/run-druid`. This
|
||||||
|
script uses `bin/run-java` internally, and automatically adds the proper flags.
|
||||||
|
|
||||||
|
3. If you launch Druid using a `java` command, replace `java` with `bin/run-java`. Druid's bundled
|
||||||
|
`bin/run-java` script automatically adds the proper flags.
|
||||||
|
|
||||||
|
4. If you launch Druid without using its bundled scripts, ensure the following parameters are added to your Java
|
||||||
|
command line:
|
||||||
|
|
||||||
```
|
```
|
||||||
druid.indexer.runner.javaOptsArray=["-server","-Xms1g","-Xmx1g","-XX:MaxDirectMemorySize=1g","-Duser.timezone=UTC","-Dfile.encoding=UTF-8","-XX:+ExitOnOutOfMemoryError","-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager","--add-exports=java.base/jdk.internal.ref=ALL-UNNAMED","--add-exports=java.base/jdk.internal.misc=ALL-UNNAMED","--add-opens=java.base/java.lang=ALL-UNNAMED","--add-opens=java.base/java.io=ALL-UNNAMED","--add-opens=java.base/java.nio=ALL-UNNAMED","--add-opens=java.base/jdk.internal.ref=ALL-UNNAMED","--add-opens=java.base/sun.nio.ch=ALL-UNNAMED"]
|
--add-exports=java.base/jdk.internal.misc=ALL-UNNAMED \
|
||||||
```
|
--add-exports=java.base/jdk.internal.ref=ALL-UNNAMED \
|
||||||
|
--add-opens=java.base/java.nio=ALL-UNNAMED \
|
||||||
The `Xms`, `Xmx`, and `MaxDirectMemorySize` parameters in the line above are merely an example. You may use different
|
--add-opens=java.base/sun.nio.ch=ALL-UNNAMED \
|
||||||
values in your specific environment.
|
--add-opens=java.base/jdk.internal.ref=ALL-UNNAMED \
|
||||||
|
--add-opens=java.base/java.io=ALL-UNNAMED \
|
||||||
## Java 17
|
--add-opens=java.base/java.lang=ALL-UNNAMED \
|
||||||
|
--add-opens=jdk.management/com.sun.management.internal=ALL-UNNAMED
|
||||||
Druid has experimental support for Java 17.
|
|
||||||
|
|
||||||
An important change in Java 17 is that [strong encapsulation](#strong-encapsulation) is enabled by default. The various
|
|
||||||
`--add-opens` and `--add-exports` parameters listed in the [strong encapsulation](#strong-encapsulation) section are
|
|
||||||
required in all `jvm.config` files and in `druid.indexer.runner.javaOpts` or `druid.indexer.runner.javaOptsArray` on
|
|
||||||
MiddleManagers. Failure to include these parameters leads to failure of various operations.
|
|
||||||
|
|
||||||
In addition, Druid's launch scripts detect Java 17 and log the following message rather than starting up:
|
|
||||||
|
|
||||||
```
|
|
||||||
Druid requires Java 8 or 11. Your current version is: 17.X.Y.
|
|
||||||
```
|
|
||||||
|
|
||||||
You can skip this check with an environment variable:
|
|
||||||
|
|
||||||
```
|
|
||||||
export DRUID_SKIP_JAVA_CHECK=1
|
|
||||||
```
|
```
|
||||||
|
@ -133,7 +133,7 @@ The [basic cluster tuning guide](../operations/basic-cluster-tuning.md) has info
|
|||||||
|
|
||||||
We recommend running your favorite Linux distribution. You will also need
|
We recommend running your favorite Linux distribution. You will also need
|
||||||
|
|
||||||
* [Java 8 or 11](../operations/java.md).
|
* [Java 8u92+, 11, or 17](../operations/java.md)
|
||||||
* Python 2 or Python 3
|
* Python 2 or Python 3
|
||||||
|
|
||||||
> If needed, you can specify where to find Java using the environment variables
|
> If needed, you can specify where to find Java using the environment variables
|
||||||
|
@ -40,7 +40,7 @@ You can follow these steps on a relatively modest machine, such as a workstation
|
|||||||
The software requirements for the installation machine are:
|
The software requirements for the installation machine are:
|
||||||
|
|
||||||
* Linux, Mac OS X, or other Unix-like OS. (Windows is not supported)
|
* Linux, Mac OS X, or other Unix-like OS. (Windows is not supported)
|
||||||
* [Java 8u92+ or Java 11](../operations/java.md)
|
* [Java 8u92+, 11, or 17](../operations/java.md)
|
||||||
* Python 3 (preferred) or Python 2
|
* Python 3 (preferred) or Python 2
|
||||||
* Perl 5
|
* Perl 5
|
||||||
|
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
## Initialization script for druid nodes
|
## Initialization script for druid nodes
|
||||||
## Runs druid nodes as a daemon
|
## Runs druid nodes as a daemon
|
||||||
## Environment Variables used by this script -
|
## Environment Variables used by this script -
|
||||||
|
## DRUID_BIN_DIR - directory having druid bin files, default=bin
|
||||||
## DRUID_LIB_DIR - directory having druid jar files, default=lib
|
## DRUID_LIB_DIR - directory having druid jar files, default=lib
|
||||||
## DRUID_CONF_DIR - directory having druid config files, default=conf/druid
|
## DRUID_CONF_DIR - directory having druid config files, default=conf/druid
|
||||||
## DRUID_LOG_DIR - directory used to store druid logs, default=log
|
## DRUID_LOG_DIR - directory used to store druid logs, default=log
|
||||||
@ -36,11 +37,11 @@ shift
|
|||||||
command=$1
|
command=$1
|
||||||
|
|
||||||
LIB_DIR="${DRUID_LIB_DIR:=lib}"
|
LIB_DIR="${DRUID_LIB_DIR:=lib}"
|
||||||
|
BIN_DIR="${DRUID_BIN_DIR:=$DRUID_LIB_DIR/../bin}"
|
||||||
CONF_DIR="${DRUID_CONF_DIR:=conf/druid}"
|
CONF_DIR="${DRUID_CONF_DIR:=conf/druid}"
|
||||||
PID_DIR="${DRUID_PID_DIR:=var/druid/pids}"
|
PID_DIR="${DRUID_PID_DIR:=var/druid/pids}"
|
||||||
WHEREAMI="$(dirname "$0")"
|
WHEREAMI="$(dirname "$0")"
|
||||||
WHEREAMI="$(cd "$WHEREAMI" && pwd)"
|
WHEREAMI="$(cd "$WHEREAMI" && pwd)"
|
||||||
JAVA_BIN_DIR="$(. /"$WHEREAMI"/java-util && get_java_bin_dir)"
|
|
||||||
|
|
||||||
# Remove possilble ending slash
|
# Remove possilble ending slash
|
||||||
LOG_DIR="${DRUID_LOG_DIR:=${WHEREAMI}/log}"
|
LOG_DIR="${DRUID_LOG_DIR:=${WHEREAMI}/log}"
|
||||||
@ -64,13 +65,7 @@ case $command in
|
|||||||
if [ ! -d "$PID_DIR" ]; then mkdir -p $PID_DIR; fi
|
if [ ! -d "$PID_DIR" ]; then mkdir -p $PID_DIR; fi
|
||||||
if [ ! -d "$LOG_DIR" ]; then mkdir -p $LOG_DIR; fi
|
if [ ! -d "$LOG_DIR" ]; then mkdir -p $LOG_DIR; fi
|
||||||
|
|
||||||
if [ -z "$JAVA_BIN_DIR" ]; then
|
nohup "$BIN_DIR/run-java" -Ddruid.node.type=$nodeType "-Ddruid.log.path=$LOG_DIR" `cat $CONF_DIR/$nodeType/jvm.config | xargs` -cp $CONF_DIR/_common:$CONF_DIR/$nodeType:$LIB_DIR/*:$HADOOP_CONF_DIR org.apache.druid.cli.Main server $nodeType >> /dev/null 2>&1 &
|
||||||
echo "Could not find java - please run $WHEREAMI/verify-java to confirm it is installed."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
JAVA="$JAVA_BIN_DIR/java"
|
|
||||||
|
|
||||||
nohup $JAVA -Ddruid.node.type=$nodeType "-Ddruid.log.path=$LOG_DIR" `cat $CONF_DIR/$nodeType/jvm.config | xargs` -cp $CONF_DIR/_common:$CONF_DIR/$nodeType:$LIB_DIR/*:$HADOOP_CONF_DIR org.apache.druid.cli.Main server $nodeType >> /dev/null 2>&1 &
|
|
||||||
nodeType_PID=$!
|
nodeType_PID=$!
|
||||||
echo $nodeType_PID > $pid
|
echo $nodeType_PID > $pid
|
||||||
echo "Started $nodeType node, pid: $nodeType_PID"
|
echo "Started $nodeType node, pid: $nodeType_PID"
|
||||||
|
@ -26,31 +26,24 @@ fi
|
|||||||
|
|
||||||
JAVA_MAJOR="$("$JAVA_BIN" -version 2>&1 | sed -n -E 's/.* version "([^."]*).*/\1/p')"
|
JAVA_MAJOR="$("$JAVA_BIN" -version 2>&1 | sed -n -E 's/.* version "([^."]*).*/\1/p')"
|
||||||
|
|
||||||
if [ "$JAVA_MAJOR" != "" ] && [ "$JAVA_MAJOR" -ge "17" ]
|
if [ "$JAVA_MAJOR" != "" ] && [ "$JAVA_MAJOR" -ge "11" ]
|
||||||
then
|
then
|
||||||
# Must disable strong encapsulation for certain packages on Java 17.
|
# Disable strong encapsulation for certain packages on Java 11+.
|
||||||
# The last one is required for metric emit, see https://github.com/apache/druid/issues/12312
|
# When updating this list, update all four:
|
||||||
|
# 1) ForkingTaskRunner#STRONG_ENCAPSULATION_PROPERTIES
|
||||||
|
# 2) docs/operations/java.md, "Strong encapsulation" section
|
||||||
|
# 3) pom.xml, jdk.strong.encapsulation.argLine
|
||||||
|
# 4) examples/bin/run-java script (here)
|
||||||
exec "$JAVA_BIN" \
|
exec "$JAVA_BIN" \
|
||||||
--add-exports=java.base/jdk.internal.misc=ALL-UNNAMED \
|
--add-exports=java.base/jdk.internal.misc=ALL-UNNAMED \
|
||||||
--add-exports=java.base/jdk.internal.ref=ALL-UNNAMED \
|
--add-exports=java.base/jdk.internal.ref=ALL-UNNAMED \
|
||||||
--add-opens=java.base/jdk.internal.ref=ALL-UNNAMED \
|
|
||||||
--add-opens=java.base/java.nio=ALL-UNNAMED \
|
--add-opens=java.base/java.nio=ALL-UNNAMED \
|
||||||
--add-opens=java.base/sun.nio.ch=ALL-UNNAMED \
|
--add-opens=java.base/sun.nio.ch=ALL-UNNAMED \
|
||||||
|
--add-opens=java.base/jdk.internal.ref=ALL-UNNAMED \
|
||||||
--add-opens=java.base/java.io=ALL-UNNAMED \
|
--add-opens=java.base/java.io=ALL-UNNAMED \
|
||||||
--add-opens=java.base/java.lang=ALL-UNNAMED \
|
--add-opens=java.base/java.lang=ALL-UNNAMED \
|
||||||
--add-opens=jdk.management/com.sun.management.internal=ALL-UNNAMED \
|
--add-opens=jdk.management/com.sun.management.internal=ALL-UNNAMED \
|
||||||
"$@"
|
"$@"
|
||||||
elif [ "$JAVA_MAJOR" != "" ] && [ "$JAVA_MAJOR" -ge "11" ]
|
|
||||||
then
|
|
||||||
# The first 4 parameters below are required to use datasketches-memory as a library.
|
|
||||||
# And the last one is required for metric emit, see https://github.com/apache/druid/issues/12312
|
|
||||||
exec "$JAVA_BIN" \
|
|
||||||
--add-exports=java.base/jdk.internal.misc=ALL-UNNAMED \
|
|
||||||
--add-exports=java.base/jdk.internal.ref=ALL-UNNAMED \
|
|
||||||
--add-opens=java.base/java.nio=ALL-UNNAMED \
|
|
||||||
--add-opens=java.base/sun.nio.ch=ALL-UNNAMED \
|
|
||||||
--add-opens=jdk.management/com.sun.management.internal=ALL-UNNAMED \
|
|
||||||
"$@"
|
|
||||||
else
|
else
|
||||||
exec "$JAVA_BIN" "$@"
|
exec "$JAVA_BIN" "$@"
|
||||||
fi
|
fi
|
||||||
|
@ -28,14 +28,14 @@ sub fail_check {
|
|||||||
: "No Java runtime was detected on your system.";
|
: "No Java runtime was detected on your system.";
|
||||||
|
|
||||||
print STDERR <<"EOT";
|
print STDERR <<"EOT";
|
||||||
Druid requires Java 8 or 11. $current_version_text
|
Druid requires Java 8, 11, or 17. $current_version_text
|
||||||
|
|
||||||
If you believe this check is in error, or you want to proceed with a potentially
|
If you believe this check is in error, or you want to proceed with a potentially
|
||||||
unsupported Java runtime, you can skip this check using an environment variable:
|
unsupported Java runtime, you can skip this check using an environment variable:
|
||||||
|
|
||||||
export DRUID_SKIP_JAVA_CHECK=1
|
export DRUID_SKIP_JAVA_CHECK=1
|
||||||
|
|
||||||
Otherwise, install Java 8 or 11 in one of the following locations.
|
Otherwise, install Java 8, 11, or 17 in one of the following locations.
|
||||||
|
|
||||||
* DRUID_JAVA_HOME
|
* DRUID_JAVA_HOME
|
||||||
* JAVA_HOME
|
* JAVA_HOME
|
||||||
@ -68,6 +68,6 @@ if ($?) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
# If we know it won't work, die. Otherwise hope for the best.
|
# If we know it won't work, die. Otherwise hope for the best.
|
||||||
if ($java_version =~ /version \"((\d+)\.(\d+).*?)\"/ && !($2 == 1 && $3 == 8) && $2 != 11 ) {
|
if ($java_version =~ /version \"((\d+)\.(\d+).*?)\"/ && !($2 == 1 && $3 == 8) && $2 != 11 && $2 != 17 ) {
|
||||||
fail_check($1);
|
fail_check($1);
|
||||||
}
|
}
|
||||||
|
@ -25,7 +25,6 @@ druid.plaintextPort=8091
|
|||||||
# https://druid.apache.org/docs/latest/operations/basic-cluster-tuning.html#middlemanager
|
# https://druid.apache.org/docs/latest/operations/basic-cluster-tuning.html#middlemanager
|
||||||
|
|
||||||
# Task launch parameters
|
# Task launch parameters
|
||||||
druid.indexer.runner.javaCommand=bin/run-java
|
|
||||||
druid.worker.baseTaskDirs=[\"var/druid/task\"]
|
druid.worker.baseTaskDirs=[\"var/druid/task\"]
|
||||||
|
|
||||||
# Processing threads and buffers on Peons
|
# Processing threads and buffers on Peons
|
||||||
|
@ -25,7 +25,6 @@ druid.worker.capacity=4
|
|||||||
druid.worker.baseTaskDirs=[\"var/druid/task\"]
|
druid.worker.baseTaskDirs=[\"var/druid/task\"]
|
||||||
|
|
||||||
# Task launch parameters
|
# Task launch parameters
|
||||||
druid.indexer.runner.javaCommand=bin/run-java
|
|
||||||
druid.indexer.runner.javaOptsArray=["-server","-Xms1g","-Xmx1g","-XX:MaxDirectMemorySize=1g","-Duser.timezone=UTC","-Dfile.encoding=UTF-8","-XX:+ExitOnOutOfMemoryError","-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
druid.indexer.runner.javaOptsArray=["-server","-Xms1g","-Xmx1g","-XX:MaxDirectMemorySize=1g","-Duser.timezone=UTC","-Dfile.encoding=UTF-8","-XX:+ExitOnOutOfMemoryError","-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||||
|
|
||||||
# HTTP server threads
|
# HTTP server threads
|
||||||
|
@ -25,7 +25,6 @@ druid.worker.capacity=8
|
|||||||
druid.worker.baseTaskDirs=[\"var/druid/task\"]
|
druid.worker.baseTaskDirs=[\"var/druid/task\"]
|
||||||
|
|
||||||
# Task launch parameters
|
# Task launch parameters
|
||||||
druid.indexer.runner.javaCommand=bin/run-java
|
|
||||||
druid.indexer.runner.javaOptsArray=["-server","-Xms1g","-Xmx1g","-XX:MaxDirectMemorySize=1g","-Duser.timezone=UTC","-Dfile.encoding=UTF-8","-XX:+ExitOnOutOfMemoryError","-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
druid.indexer.runner.javaOptsArray=["-server","-Xms1g","-Xmx1g","-XX:MaxDirectMemorySize=1g","-Duser.timezone=UTC","-Dfile.encoding=UTF-8","-XX:+ExitOnOutOfMemoryError","-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||||
|
|
||||||
# HTTP server threads
|
# HTTP server threads
|
||||||
|
@ -25,7 +25,6 @@ druid.worker.capacity=4
|
|||||||
druid.worker.baseTaskDirs=[\"var/druid/task\"]
|
druid.worker.baseTaskDirs=[\"var/druid/task\"]
|
||||||
|
|
||||||
# Task launch parameters
|
# Task launch parameters
|
||||||
druid.indexer.runner.javaCommand=bin/run-java
|
|
||||||
druid.indexer.runner.javaOptsArray=["-server","-Xms1g","-Xmx1g","-XX:MaxDirectMemorySize=1g","-Duser.timezone=UTC","-Dfile.encoding=UTF-8","-XX:+ExitOnOutOfMemoryError","-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
druid.indexer.runner.javaOptsArray=["-server","-Xms1g","-Xmx1g","-XX:MaxDirectMemorySize=1g","-Duser.timezone=UTC","-Dfile.encoding=UTF-8","-XX:+ExitOnOutOfMemoryError","-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||||
|
|
||||||
# HTTP server threads
|
# HTTP server threads
|
||||||
|
@ -25,7 +25,6 @@ druid.worker.capacity=2
|
|||||||
druid.worker.baseTaskDirs=[\"var/druid/task\"]
|
druid.worker.baseTaskDirs=[\"var/druid/task\"]
|
||||||
|
|
||||||
# Task launch parameters
|
# Task launch parameters
|
||||||
druid.indexer.runner.javaCommand=bin/run-java
|
|
||||||
druid.indexer.runner.javaOptsArray=["-server","-Xms1g","-Xmx1g","-XX:MaxDirectMemorySize=1g","-Duser.timezone=UTC","-Dfile.encoding=UTF-8","-XX:+ExitOnOutOfMemoryError","-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
druid.indexer.runner.javaOptsArray=["-server","-Xms1g","-Xmx1g","-XX:MaxDirectMemorySize=1g","-Duser.timezone=UTC","-Dfile.encoding=UTF-8","-XX:+ExitOnOutOfMemoryError","-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||||
|
|
||||||
# HTTP server threads
|
# HTTP server threads
|
||||||
|
@ -25,7 +25,6 @@ druid.worker.capacity=2
|
|||||||
druid.worker.baseTaskDirs=[\"var/druid/task\"]
|
druid.worker.baseTaskDirs=[\"var/druid/task\"]
|
||||||
|
|
||||||
# Task launch parameters
|
# Task launch parameters
|
||||||
druid.indexer.runner.javaCommand=bin/run-java
|
|
||||||
druid.indexer.runner.javaOptsArray=["-server","-Xms256m","-Xmx256m","-XX:MaxDirectMemorySize=300m","-Duser.timezone=UTC","-Dfile.encoding=UTF-8","-XX:+ExitOnOutOfMemoryError","-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
druid.indexer.runner.javaOptsArray=["-server","-Xms256m","-Xmx256m","-XX:MaxDirectMemorySize=300m","-Duser.timezone=UTC","-Dfile.encoding=UTF-8","-XX:+ExitOnOutOfMemoryError","-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||||
|
|
||||||
# HTTP server threads
|
# HTTP server threads
|
||||||
|
@ -25,7 +25,6 @@ druid.worker.capacity=3
|
|||||||
druid.worker.baseTaskDirs=[\"var/druid/task\"]
|
druid.worker.baseTaskDirs=[\"var/druid/task\"]
|
||||||
|
|
||||||
# Task launch parameters
|
# Task launch parameters
|
||||||
druid.indexer.runner.javaCommand=bin/run-java
|
|
||||||
druid.indexer.runner.javaOptsArray=["-server","-Xms1g","-Xmx1g","-XX:MaxDirectMemorySize=1g","-Duser.timezone=UTC","-Dfile.encoding=UTF-8","-XX:+ExitOnOutOfMemoryError","-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
druid.indexer.runner.javaOptsArray=["-server","-Xms1g","-Xmx1g","-XX:MaxDirectMemorySize=1g","-Duser.timezone=UTC","-Dfile.encoding=UTF-8","-XX:+ExitOnOutOfMemoryError","-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||||
|
|
||||||
# HTTP server threads
|
# HTTP server threads
|
||||||
|
@ -25,7 +25,6 @@ druid.worker.capacity=16
|
|||||||
druid.worker.baseTaskDirs=[\"var/druid/task\"]
|
druid.worker.baseTaskDirs=[\"var/druid/task\"]
|
||||||
|
|
||||||
# Task launch parameters
|
# Task launch parameters
|
||||||
druid.indexer.runner.javaCommand=bin/run-java
|
|
||||||
druid.indexer.runner.javaOptsArray=["-server","-Xms1g","-Xmx1g","-XX:MaxDirectMemorySize=1g","-Duser.timezone=UTC","-Dfile.encoding=UTF-8","-XX:+ExitOnOutOfMemoryError","-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
druid.indexer.runner.javaOptsArray=["-server","-Xms1g","-Xmx1g","-XX:MaxDirectMemorySize=1g","-Duser.timezone=UTC","-Dfile.encoding=UTF-8","-XX:+ExitOnOutOfMemoryError","-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
|
||||||
|
|
||||||
# HTTP server threads
|
# HTTP server threads
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
|
|
||||||
package org.apache.druid.data.input.avro;
|
package org.apache.druid.data.input.avro;
|
||||||
|
|
||||||
|
import com.google.common.collect.ImmutableMap;
|
||||||
import com.google.common.collect.ImmutableSet;
|
import com.google.common.collect.ImmutableSet;
|
||||||
import org.apache.avro.generic.GenericRecord;
|
import org.apache.avro.generic.GenericRecord;
|
||||||
import org.apache.avro.util.Utf8;
|
import org.apache.avro.util.Utf8;
|
||||||
@ -108,12 +109,12 @@ public class AvroFlattenerMakerTest
|
|||||||
|
|
||||||
// map
|
// map
|
||||||
Assert.assertEquals(2, flattener.makeJsonPathExtractor("$.someMultiMemberUnion.map.two").apply(
|
Assert.assertEquals(2, flattener.makeJsonPathExtractor("$.someMultiMemberUnion.map.two").apply(
|
||||||
AvroStreamInputRowParserTest.buildSomeAvroDatumWithUnionValue(new HashMap<String, Integer>() {{
|
AvroStreamInputRowParserTest.buildSomeAvroDatumWithUnionValue(
|
||||||
put("one", 1);
|
ImmutableMap.<String, Integer>builder()
|
||||||
put("two", 2);
|
.put("one", 1)
|
||||||
put("three", 3);
|
.put("two", 2)
|
||||||
}
|
.put("three", 3)
|
||||||
}
|
.build()
|
||||||
)));
|
)));
|
||||||
|
|
||||||
// array
|
// array
|
||||||
|
@ -38,6 +38,7 @@ import org.apache.druid.server.security.AuthenticationResult;
|
|||||||
import javax.annotation.Nullable;
|
import javax.annotation.Nullable;
|
||||||
import javax.naming.AuthenticationException;
|
import javax.naming.AuthenticationException;
|
||||||
import javax.naming.Context;
|
import javax.naming.Context;
|
||||||
|
import javax.naming.Name;
|
||||||
import javax.naming.NamingEnumeration;
|
import javax.naming.NamingEnumeration;
|
||||||
import javax.naming.NamingException;
|
import javax.naming.NamingException;
|
||||||
import javax.naming.directory.DirContext;
|
import javax.naming.directory.DirContext;
|
||||||
@ -211,6 +212,17 @@ public class LDAPCredentialsValidator implements CredentialsValidator
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Retrieves an LDAP user object by using {@link javax.naming.ldap.LdapContext#search(Name, String, SearchControls)}.
|
||||||
|
*
|
||||||
|
* Regarding the "BanJNDI" suppression: Errorprone flags all usage of APIs that may do JNDI lookups because of the
|
||||||
|
* potential for RCE. The risk is that an attacker with ability to set user-level properties on the LDAP server could
|
||||||
|
* cause us to read a serialized Java object (a well-known security risk). We mitigate the risk by avoiding the
|
||||||
|
* "lookup" API, and using the "search" API *without* setting the returningObjFlag.
|
||||||
|
*
|
||||||
|
* See https://errorprone.info/bugpattern/BanJNDI for more details.
|
||||||
|
*/
|
||||||
|
@SuppressWarnings("BanJNDI")
|
||||||
@Nullable
|
@Nullable
|
||||||
SearchResult getLdapUserObject(BasicAuthLDAPConfig ldapConfig, DirContext context, String username)
|
SearchResult getLdapUserObject(BasicAuthLDAPConfig ldapConfig, DirContext context, String username)
|
||||||
{
|
{
|
||||||
|
@ -98,6 +98,7 @@ public class LDAPCredentialsValidatorTest
|
|||||||
|
|
||||||
public static class MockContextFactory implements InitialContextFactory
|
public static class MockContextFactory implements InitialContextFactory
|
||||||
{
|
{
|
||||||
|
@SuppressWarnings("BanJNDI") // False positive: usage here is a mock in tests.
|
||||||
@Override
|
@Override
|
||||||
public Context getInitialContext(Hashtable<?, ?> environment) throws NamingException
|
public Context getInitialContext(Hashtable<?, ?> environment) throws NamingException
|
||||||
{
|
{
|
||||||
|
@ -60,7 +60,6 @@ import java.util.concurrent.ExecutorService;
|
|||||||
import java.util.concurrent.atomic.AtomicBoolean;
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
import java.util.concurrent.atomic.AtomicLong;
|
import java.util.concurrent.atomic.AtomicLong;
|
||||||
import java.util.concurrent.atomic.AtomicReference;
|
import java.util.concurrent.atomic.AtomicReference;
|
||||||
import java.util.stream.IntStream;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Like {@link org.apache.druid.indexing.common.task.batch.parallel.TaskMonitor}, but different.
|
* Like {@link org.apache.druid.indexing.common.task.batch.parallel.TaskMonitor}, but different.
|
||||||
@ -232,7 +231,7 @@ public class MSQWorkerTaskLauncher
|
|||||||
taskIds.notifyAll();
|
taskIds.notifyAll();
|
||||||
}
|
}
|
||||||
|
|
||||||
while (taskIds.size() < taskCount || !IntStream.range(0, taskCount).allMatch(fullyStartedTasks::contains)) {
|
while (taskIds.size() < taskCount || !allTasksStarted(taskCount)) {
|
||||||
if (stopFuture.isDone() || stopFuture.isCancelled()) {
|
if (stopFuture.isDone() || stopFuture.isCancelled()) {
|
||||||
FutureUtils.getUnchecked(stopFuture, false);
|
FutureUtils.getUnchecked(stopFuture, false);
|
||||||
throw new ISE("Stopped");
|
throw new ISE("Stopped");
|
||||||
@ -276,6 +275,7 @@ public class MSQWorkerTaskLauncher
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Report a worker that failed without active orders. To be retried if it is requried for future stages only.
|
* Report a worker that failed without active orders. To be retried if it is requried for future stages only.
|
||||||
|
*
|
||||||
* @param workerNumber worker number
|
* @param workerNumber worker number
|
||||||
*/
|
*/
|
||||||
public void reportFailedInactiveWorker(int workerNumber)
|
public void reportFailedInactiveWorker(int workerNumber)
|
||||||
@ -289,6 +289,7 @@ public class MSQWorkerTaskLauncher
|
|||||||
* Blocks the call untill the worker tasks are ready to be contacted for work.
|
* Blocks the call untill the worker tasks are ready to be contacted for work.
|
||||||
*
|
*
|
||||||
* @param workerSet
|
* @param workerSet
|
||||||
|
*
|
||||||
* @throws InterruptedException
|
* @throws InterruptedException
|
||||||
*/
|
*/
|
||||||
public void waitUntilWorkersReady(Set<Integer> workerSet) throws InterruptedException
|
public void waitUntilWorkersReady(Set<Integer> workerSet) throws InterruptedException
|
||||||
@ -684,6 +685,21 @@ public class MSQWorkerTaskLauncher
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Whether {@link #fullyStartedTasks} contains all tasks from 0 (inclusive) to taskCount (exclusive).
|
||||||
|
*/
|
||||||
|
@GuardedBy("taskIds")
|
||||||
|
private boolean allTasksStarted(final int taskCount)
|
||||||
|
{
|
||||||
|
for (int i = 0; i < taskCount; i++) {
|
||||||
|
if (!fullyStartedTasks.contains(i)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Used by the main loop to decide how often to check task status.
|
* Used by the main loop to decide how often to check task status.
|
||||||
*/
|
*/
|
||||||
@ -763,7 +779,7 @@ public class MSQWorkerTaskLauncher
|
|||||||
* 2. The location has never been reported by the task. If this is not the case, the task has started already.
|
* 2. The location has never been reported by the task. If this is not the case, the task has started already.
|
||||||
* 3. Task has taken more than maxTaskStartDelayMillis to start.
|
* 3. Task has taken more than maxTaskStartDelayMillis to start.
|
||||||
* 4. No task has started in maxTaskStartDelayMillis. This is in case the cluster is scaling up and other workers
|
* 4. No task has started in maxTaskStartDelayMillis. This is in case the cluster is scaling up and other workers
|
||||||
* are starting.
|
* are starting.
|
||||||
*/
|
*/
|
||||||
public boolean didRunTimeOut(final long maxTaskStartDelayMillis)
|
public boolean didRunTimeOut(final long maxTaskStartDelayMillis)
|
||||||
{
|
{
|
||||||
|
@ -97,6 +97,25 @@ public class ForkingTaskRunner
|
|||||||
{
|
{
|
||||||
private static final EmittingLogger LOGGER = new EmittingLogger(ForkingTaskRunner.class);
|
private static final EmittingLogger LOGGER = new EmittingLogger(ForkingTaskRunner.class);
|
||||||
private static final String CHILD_PROPERTY_PREFIX = "druid.indexer.fork.property.";
|
private static final String CHILD_PROPERTY_PREFIX = "druid.indexer.fork.property.";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Properties to add on Java 11+. When updating this list, update all four:
|
||||||
|
* 1) ForkingTaskRunner#STRONG_ENCAPSULATION_PROPERTIES (here) -->
|
||||||
|
* 2) docs/operations/java.md, "Strong encapsulation" section -->
|
||||||
|
* 3) pom.xml, jdk.strong.encapsulation.argLine -->
|
||||||
|
* 4) examples/bin/run-java script
|
||||||
|
*/
|
||||||
|
private static final List<String> STRONG_ENCAPSULATION_PROPERTIES = ImmutableList.of(
|
||||||
|
"--add-exports=java.base/jdk.internal.misc=ALL-UNNAMED",
|
||||||
|
"--add-exports=java.base/jdk.internal.ref=ALL-UNNAMED",
|
||||||
|
"--add-opens=java.base/java.nio=ALL-UNNAMED",
|
||||||
|
"--add-opens=java.base/sun.nio.ch=ALL-UNNAMED",
|
||||||
|
"--add-opens=java.base/jdk.internal.ref=ALL-UNNAMED",
|
||||||
|
"--add-opens=java.base/java.io=ALL-UNNAMED",
|
||||||
|
"--add-opens=java.base/java.lang=ALL-UNNAMED",
|
||||||
|
"--add-opens=jdk.management/com.sun.management.internal=ALL-UNNAMED"
|
||||||
|
);
|
||||||
|
|
||||||
private final ForkingTaskRunnerConfig config;
|
private final ForkingTaskRunnerConfig config;
|
||||||
private final Properties props;
|
private final Properties props;
|
||||||
private final TaskLogPusher taskLogPusher;
|
private final TaskLogPusher taskLogPusher;
|
||||||
@ -224,6 +243,11 @@ public class ForkingTaskRunner
|
|||||||
}
|
}
|
||||||
|
|
||||||
command.add(config.getJavaCommand());
|
command.add(config.getJavaCommand());
|
||||||
|
|
||||||
|
if (JvmUtils.majorVersion() >= 11) {
|
||||||
|
command.addAll(STRONG_ENCAPSULATION_PROPERTIES);
|
||||||
|
}
|
||||||
|
|
||||||
command.add("-cp");
|
command.add("-cp");
|
||||||
command.add(taskClasspath);
|
command.add(taskClasspath);
|
||||||
|
|
||||||
|
@ -321,12 +321,14 @@ public class HeapMemoryTaskStorage implements TaskStorage
|
|||||||
List<String> taskIds = tasks.entrySet().stream()
|
List<String> taskIds = tasks.entrySet().stream()
|
||||||
.filter(entry -> entry.getValue().getStatus().isComplete()
|
.filter(entry -> entry.getValue().getStatus().isComplete()
|
||||||
&& entry.getValue().getCreatedDate().isBefore(timestamp))
|
&& entry.getValue().getCreatedDate().isBefore(timestamp))
|
||||||
.map(entry -> entry.getKey())
|
.map(Map.Entry::getKey)
|
||||||
.collect(Collectors.toList());
|
.collect(Collectors.toList());
|
||||||
|
|
||||||
taskIds.forEach(tasks::remove);
|
taskIds.forEach(tasks::remove);
|
||||||
synchronized (taskActions) {
|
synchronized (taskActions) {
|
||||||
taskIds.forEach(taskActions::removeAll);
|
for (String taskId : taskIds) {
|
||||||
|
taskActions.removeAll(taskId);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -520,7 +520,7 @@ public class TaskQueue
|
|||||||
try {
|
try {
|
||||||
Preconditions.checkState(active, "Queue is not active!");
|
Preconditions.checkState(active, "Queue is not active!");
|
||||||
Preconditions.checkNotNull(task, "task");
|
Preconditions.checkNotNull(task, "task");
|
||||||
Preconditions.checkState(tasks.size() < config.getMaxSize(), "Too many tasks (max = %,d)", config.getMaxSize());
|
Preconditions.checkState(tasks.size() < config.getMaxSize(), "Too many tasks (max = %s)", config.getMaxSize());
|
||||||
|
|
||||||
// If this throws with any sort of exception, including TaskExistsException, we don't want to
|
// If this throws with any sort of exception, including TaskExistsException, we don't want to
|
||||||
// insert the task into our queue. So don't catch it.
|
// insert the task into our queue. So don't catch it.
|
||||||
|
@ -1897,7 +1897,6 @@ public abstract class SeekableStreamIndexTaskRunner<PartitionIdType, SequenceOff
|
|||||||
final SequenceOffsetType currOffset = Preconditions.checkNotNull(
|
final SequenceOffsetType currOffset = Preconditions.checkNotNull(
|
||||||
currOffsets.get(partition),
|
currOffsets.get(partition),
|
||||||
"Current offset is null for partition[%s]",
|
"Current offset is null for partition[%s]",
|
||||||
recordOffset,
|
|
||||||
partition
|
partition
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -0,0 +1,57 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.druid.indexing.overlord;
|
||||||
|
|
||||||
|
import org.apache.druid.indexer.TaskStatus;
|
||||||
|
import org.apache.druid.indexing.common.config.TaskStorageConfig;
|
||||||
|
import org.apache.druid.indexing.common.task.NoopTask;
|
||||||
|
import org.apache.druid.java.util.common.DateTimes;
|
||||||
|
import org.joda.time.Period;
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
public class HeapMemoryTaskStorageTest
|
||||||
|
{
|
||||||
|
private HeapMemoryTaskStorage storage;
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void setUp()
|
||||||
|
{
|
||||||
|
storage = new HeapMemoryTaskStorage(new TaskStorageConfig(Period.days(1)));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testRemoveTasksOlderThan()
|
||||||
|
{
|
||||||
|
final NoopTask task1 = NoopTask.create("foo");
|
||||||
|
final NoopTask task2 = NoopTask.create("bar");
|
||||||
|
storage.insert(task1, TaskStatus.success(task1.getId()));
|
||||||
|
storage.insert(task2, TaskStatus.running(task2.getId()));
|
||||||
|
|
||||||
|
storage.removeTasksOlderThan(DateTimes.of("2000").getMillis());
|
||||||
|
Assert.assertNotNull(storage.getTaskInfo(task1.getId()));
|
||||||
|
Assert.assertNotNull(storage.getTaskInfo(task2.getId()));
|
||||||
|
|
||||||
|
storage.removeTasksOlderThan(DateTimes.of("3000").getMillis());
|
||||||
|
Assert.assertNull(storage.getTaskInfo(task1.getId()));
|
||||||
|
Assert.assertNotNull(storage.getTaskInfo(task2.getId()));
|
||||||
|
}
|
||||||
|
}
|
@ -439,7 +439,7 @@ public class TaskQueueTest extends IngestionTestBase
|
|||||||
HttpRemoteTaskRunnerTest.TestDruidNodeDiscovery druidNodeDiscovery = new HttpRemoteTaskRunnerTest.TestDruidNodeDiscovery();
|
HttpRemoteTaskRunnerTest.TestDruidNodeDiscovery druidNodeDiscovery = new HttpRemoteTaskRunnerTest.TestDruidNodeDiscovery();
|
||||||
DruidNodeDiscoveryProvider druidNodeDiscoveryProvider = EasyMock.createMock(DruidNodeDiscoveryProvider.class);
|
DruidNodeDiscoveryProvider druidNodeDiscoveryProvider = EasyMock.createMock(DruidNodeDiscoveryProvider.class);
|
||||||
EasyMock.expect(druidNodeDiscoveryProvider.getForService(WorkerNodeService.DISCOVERY_SERVICE_KEY))
|
EasyMock.expect(druidNodeDiscoveryProvider.getForService(WorkerNodeService.DISCOVERY_SERVICE_KEY))
|
||||||
.andReturn(druidNodeDiscovery);
|
.andReturn(druidNodeDiscovery);
|
||||||
EasyMock.replay(druidNodeDiscoveryProvider);
|
EasyMock.replay(druidNodeDiscoveryProvider);
|
||||||
TaskStorage taskStorageMock = EasyMock.createStrictMock(TaskStorage.class);
|
TaskStorage taskStorageMock = EasyMock.createStrictMock(TaskStorage.class);
|
||||||
for (String taskId : runningTasks) {
|
for (String taskId : runningTasks) {
|
||||||
|
@ -145,6 +145,6 @@ echo "" >> $LOG_FILE
|
|||||||
|
|
||||||
# Run Druid service
|
# Run Druid service
|
||||||
cd $DRUID_HOME
|
cd $DRUID_HOME
|
||||||
exec java $JAVA_OPTS -cp $CP \
|
exec bin/run-java $JAVA_OPTS -cp $CP \
|
||||||
org.apache.druid.cli.Main server $DRUID_SERVICE \
|
org.apache.druid.cli.Main server $DRUID_SERVICE \
|
||||||
>> $LOG_FILE 2>&1
|
>> $LOG_FILE 2>&1
|
||||||
|
@ -43,7 +43,7 @@ chown druid:druid launch.sh druid.sh
|
|||||||
cat > /run-druid.sh << EOF
|
cat > /run-druid.sh << EOF
|
||||||
#! /bin/bash
|
#! /bin/bash
|
||||||
|
|
||||||
java -cp "${DRUID_HOME}/lib/*" \\
|
"${DRUID_HOME}/bin/run-java" -cp "${DRUID_HOME}/lib/*" \\
|
||||||
-Ddruid.extensions.directory=${DRUID_HOME}/extensions \\
|
-Ddruid.extensions.directory=${DRUID_HOME}/extensions \\
|
||||||
-Ddruid.extensions.loadList='["mysql-metadata-storage"]' \\
|
-Ddruid.extensions.loadList='["mysql-metadata-storage"]' \\
|
||||||
-Ddruid.metadata.storage.type=mysql \\
|
-Ddruid.metadata.storage.type=mysql \\
|
||||||
|
@ -108,7 +108,7 @@ In such cases, a workaround is to build the code first, then use the next sectio
|
|||||||
individual tests. To build:
|
individual tests. To build:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
mvn clean package -P integration-tests -Pskip-static-checks -Pskip-tests -Dmaven.javadoc.skip=true -T1.0C -nsu
|
mvn clean package -P integration-tests -Pskip-static-checks -Pskip-tests -T1.0C -nsu
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Keep the local Maven cache fresh
|
#### Keep the local Maven cache fresh
|
||||||
@ -122,9 +122,12 @@ Maven to only look locally for snapshot jars.
|
|||||||
|
|
||||||
1. Build docker images.
|
1. Build docker images.
|
||||||
|
|
||||||
From root module run maven command, run the following command:
|
From the source root, run the following command:
|
||||||
```bash
|
```bash
|
||||||
mvn clean install -pl integration-tests -P integration-tests -Ddocker.run.skip=true -Dmaven.test.skip=true -Ddocker.build.hadoop=true
|
mvn clean install \
|
||||||
|
-Pintegration-tests,skip-static-checks,skip-tests \
|
||||||
|
-Ddocker.run.skip=true \
|
||||||
|
-Ddocker.build.hadoop=true
|
||||||
```
|
```
|
||||||
|
|
||||||
> **NOTE**: `-Ddocker.build.hadoop=true` is optional if you don't run tests against Hadoop.
|
> **NOTE**: `-Ddocker.build.hadoop=true` is optional if you don't run tests against Hadoop.
|
||||||
|
@ -44,7 +44,8 @@ RUN find /var/lib/mysql -type f -exec touch {} \; && /etc/init.d/mysql start \
|
|||||||
&& echo "CREATE USER 'druid'@'%' IDENTIFIED BY 'diurd'; GRANT ALL ON druid.* TO 'druid'@'%'; CREATE database druid DEFAULT CHARACTER SET utf8mb4;" | mysql -u root \
|
&& echo "CREATE USER 'druid'@'%' IDENTIFIED BY 'diurd'; GRANT ALL ON druid.* TO 'druid'@'%'; CREATE database druid DEFAULT CHARACTER SET utf8mb4;" | mysql -u root \
|
||||||
&& /etc/init.d/mysql stop
|
&& /etc/init.d/mysql stop
|
||||||
|
|
||||||
# Add Druid jars
|
# Add Druid scripts and jars
|
||||||
|
ADD bin/* /usr/local/druid/bin/
|
||||||
ADD lib/* /usr/local/druid/lib/
|
ADD lib/* /usr/local/druid/lib/
|
||||||
COPY extensions/ /usr/local/druid/extensions/
|
COPY extensions/ /usr/local/druid/extensions/
|
||||||
|
|
||||||
@ -66,7 +67,7 @@ RUN wget -q "https://packages.confluent.io/maven/io/confluent/kafka-protobuf-pro
|
|||||||
# Add sample data
|
# Add sample data
|
||||||
# touch is needed because OverlayFS's copy-up operation breaks POSIX standards. See https://github.com/docker/for-linux/issues/72.
|
# touch is needed because OverlayFS's copy-up operation breaks POSIX standards. See https://github.com/docker/for-linux/issues/72.
|
||||||
RUN find /var/lib/mysql -type f -exec touch {} \; && service mysql start \
|
RUN find /var/lib/mysql -type f -exec touch {} \; && service mysql start \
|
||||||
&& java -cp "/usr/local/druid/lib/*" -Ddruid.extensions.directory=/usr/local/druid/extensions -Ddruid.extensions.loadList='["mysql-metadata-storage"]' -Ddruid.metadata.storage.type=mysql -Ddruid.metadata.mysql.driver.driverClassName=$MYSQL_DRIVER_CLASSNAME org.apache.druid.cli.Main tools metadata-init --connectURI="jdbc:mysql://localhost:3306/druid" --user=druid --password=diurd \
|
&& /usr/local/druid/bin/run-java -cp "/usr/local/druid/lib/*" -Ddruid.extensions.directory=/usr/local/druid/extensions -Ddruid.extensions.loadList='["mysql-metadata-storage"]' -Ddruid.metadata.storage.type=mysql -Ddruid.metadata.mysql.driver.driverClassName=$MYSQL_DRIVER_CLASSNAME org.apache.druid.cli.Main tools metadata-init --connectURI="jdbc:mysql://localhost:3306/druid" --user=druid --password=diurd \
|
||||||
&& /etc/init.d/mysql stop
|
&& /etc/init.d/mysql stop
|
||||||
ADD test-data /test-data
|
ADD test-data /test-data
|
||||||
|
|
||||||
|
@ -23,6 +23,7 @@ LC_ALL=C.UTF-8
|
|||||||
|
|
||||||
# JAVA OPTS
|
# JAVA OPTS
|
||||||
COMMON_DRUID_JAVA_OPTS=-Duser.timezone=UTC -Dfile.encoding=UTF-8 -Dlog4j.configurationFile=/shared/docker/lib/log4j2.xml -XX:+ExitOnOutOfMemoryError -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/tmp
|
COMMON_DRUID_JAVA_OPTS=-Duser.timezone=UTC -Dfile.encoding=UTF-8 -Dlog4j.configurationFile=/shared/docker/lib/log4j2.xml -XX:+ExitOnOutOfMemoryError -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/tmp
|
||||||
|
DRUID_DEP_BIN_DIR=/shared/docker/bin
|
||||||
DRUID_DEP_LIB_DIR=/shared/hadoop_xml:/shared/docker/lib/*:/usr/local/druid/lib/mysql-connector-java.jar
|
DRUID_DEP_LIB_DIR=/shared/hadoop_xml:/shared/docker/lib/*:/usr/local/druid/lib/mysql-connector-java.jar
|
||||||
|
|
||||||
# Druid configs
|
# Druid configs
|
||||||
|
@ -24,6 +24,7 @@ AWS_REGION=us-east-1
|
|||||||
|
|
||||||
# JAVA OPTS
|
# JAVA OPTS
|
||||||
COMMON_DRUID_JAVA_OPTS=-Duser.timezone=UTC -Dfile.encoding=UTF-8 -Dlog4j.configurationFile=/shared/docker/lib/log4j2.xml -XX:+ExitOnOutOfMemoryError -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/tmp
|
COMMON_DRUID_JAVA_OPTS=-Duser.timezone=UTC -Dfile.encoding=UTF-8 -Dlog4j.configurationFile=/shared/docker/lib/log4j2.xml -XX:+ExitOnOutOfMemoryError -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/tmp
|
||||||
|
DRUID_DEP_BIN_DIR=/shared/docker/bin
|
||||||
DRUID_DEP_LIB_DIR=/shared/hadoop_xml:/shared/docker/lib/*:/usr/local/druid/lib/mysql-connector-java.jar
|
DRUID_DEP_LIB_DIR=/shared/hadoop_xml:/shared/docker/lib/*:/usr/local/druid/lib/mysql-connector-java.jar
|
||||||
|
|
||||||
# Druid configs
|
# Druid configs
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
[program:druid-service]
|
[program:druid-service]
|
||||||
command=java %(ENV_SERVICE_DRUID_JAVA_OPTS)s %(ENV_COMMON_DRUID_JAVA_OPTS)s -cp %(ENV_DRUID_COMMON_CONF_DIR)s:%(ENV_DRUID_SERVICE_CONF_DIR)s:%(ENV_DRUID_DEP_LIB_DIR)s org.apache.druid.cli.Main server %(ENV_DRUID_SERVICE)s
|
command=%(ENV_DRUID_DEP_BIN_DIR)s/run-java %(ENV_SERVICE_DRUID_JAVA_OPTS)s %(ENV_COMMON_DRUID_JAVA_OPTS)s -cp %(ENV_DRUID_COMMON_CONF_DIR)s:%(ENV_DRUID_SERVICE_CONF_DIR)s:%(ENV_DRUID_DEP_LIB_DIR)s org.apache.druid.cli.Main server %(ENV_DRUID_SERVICE)s
|
||||||
redirect_stderr=true
|
redirect_stderr=true
|
||||||
priority=100
|
priority=100
|
||||||
autorestart=false
|
autorestart=false
|
||||||
|
@ -649,6 +649,7 @@
|
|||||||
</property>
|
</property>
|
||||||
</properties>
|
</properties>
|
||||||
<argLine>
|
<argLine>
|
||||||
|
${jdk.strong.encapsulation.argLine}
|
||||||
-Xmx128m
|
-Xmx128m
|
||||||
-Duser.timezone=UTC
|
-Duser.timezone=UTC
|
||||||
-Dfile.encoding=UTF-8
|
-Dfile.encoding=UTF-8
|
||||||
|
@ -30,7 +30,8 @@ cp -R docker $SHARED_DIR/docker
|
|||||||
|
|
||||||
pushd ../
|
pushd ../
|
||||||
rm -rf distribution/target/apache-druid-$DRUID_VERSION-integration-test-bin
|
rm -rf distribution/target/apache-druid-$DRUID_VERSION-integration-test-bin
|
||||||
mvn -P skip-static-checks,skip-tests -T1C -Danimal.sniffer.skip=true -Dcheckstyle.skip=true -Dweb.console.skip=true -Denforcer.skip=true -Dforbiddenapis.skip=true -Dmaven.javadoc.skip=true -Dpmd.skip=true -Dspotbugs.skip=true install -Pintegration-test
|
mvn -Pskip-static-checks,skip-tests -T1C -Danimal.sniffer.skip=true -Dcheckstyle.skip=true -Dweb.console.skip=true -Dcyclonedx.skip=true -Denforcer.skip=true -Dforbiddenapis.skip=true -Dmaven.javadoc.skip=true -Dpmd.skip=true -Dspotbugs.skip=true install -Pintegration-test
|
||||||
|
mv distribution/target/apache-druid-$DRUID_VERSION-integration-test-bin/bin $SHARED_DIR/docker/bin
|
||||||
mv distribution/target/apache-druid-$DRUID_VERSION-integration-test-bin/lib $SHARED_DIR/docker/lib
|
mv distribution/target/apache-druid-$DRUID_VERSION-integration-test-bin/lib $SHARED_DIR/docker/lib
|
||||||
mv distribution/target/apache-druid-$DRUID_VERSION-integration-test-bin/extensions $SHARED_DIR/docker/extensions
|
mv distribution/target/apache-druid-$DRUID_VERSION-integration-test-bin/extensions $SHARED_DIR/docker/extensions
|
||||||
popd
|
popd
|
||||||
@ -62,11 +63,11 @@ then
|
|||||||
## We put same version in both commands but as we have an if, correct code path will always be executed as this is generated script.
|
## We put same version in both commands but as we have an if, correct code path will always be executed as this is generated script.
|
||||||
## <TODO> Remove if
|
## <TODO> Remove if
|
||||||
if [ -n "${HADOOP_VERSION}" ] && [ "${HADOOP_VERSION:0:1}" == "3" ]; then
|
if [ -n "${HADOOP_VERSION}" ] && [ "${HADOOP_VERSION:0:1}" == "3" ]; then
|
||||||
java -cp "$SHARED_DIR/docker/lib/*" -Ddruid.extensions.hadoopDependenciesDir="$SHARED_DIR/hadoop-dependencies" org.apache.druid.cli.Main tools pull-deps -h org.apache.hadoop:hadoop-client-api:${hadoop.compile.version} -h org.apache.hadoop:hadoop-client-runtime:${hadoop.compile.version} -h org.apache.hadoop:hadoop-aws:${hadoop.compile.version} -h org.apache.hadoop:hadoop-azure:${hadoop.compile.version}
|
"$SHARED_DIR/docker/bin/run-java" -cp "$SHARED_DIR/docker/lib/*" -Ddruid.extensions.hadoopDependenciesDir="$SHARED_DIR/hadoop-dependencies" org.apache.druid.cli.Main tools pull-deps -h org.apache.hadoop:hadoop-client-api:${hadoop.compile.version} -h org.apache.hadoop:hadoop-client-runtime:${hadoop.compile.version} -h org.apache.hadoop:hadoop-aws:${hadoop.compile.version} -h org.apache.hadoop:hadoop-azure:${hadoop.compile.version}
|
||||||
curl https://storage.googleapis.com/hadoop-lib/gcs/gcs-connector-hadoop3-latest.jar --output $HADOOP_GCS_DIR/gcs-connector-hadoop3-latest.jar
|
curl https://storage.googleapis.com/hadoop-lib/gcs/gcs-connector-hadoop3-latest.jar --output $HADOOP_GCS_DIR/gcs-connector-hadoop3-latest.jar
|
||||||
cp $HADOOP_GCS_DIR/gcs-connector-hadoop3-latest.jar $DRUID_HDFS_EXT
|
cp $HADOOP_GCS_DIR/gcs-connector-hadoop3-latest.jar $DRUID_HDFS_EXT
|
||||||
else
|
else
|
||||||
java -cp "$SHARED_DIR/docker/lib/*" -Ddruid.extensions.hadoopDependenciesDir="$SHARED_DIR/hadoop-dependencies" org.apache.druid.cli.Main tools pull-deps -h org.apache.hadoop:hadoop-client:${hadoop.compile.version} -h org.apache.hadoop:hadoop-aws:${hadoop.compile.version} -h org.apache.hadoop:hadoop-azure:${hadoop.compile.version}
|
"$SHARED_DIR/docker/bin/run-java" -cp "$SHARED_DIR/docker/lib/*" -Ddruid.extensions.hadoopDependenciesDir="$SHARED_DIR/hadoop-dependencies" org.apache.druid.cli.Main tools pull-deps -h org.apache.hadoop:hadoop-client:${hadoop.compile.version} -h org.apache.hadoop:hadoop-aws:${hadoop.compile.version} -h org.apache.hadoop:hadoop-azure:${hadoop.compile.version}
|
||||||
curl https://storage.googleapis.com/hadoop-lib/gcs/gcs-connector-hadoop2-latest.jar --output $HADOOP_GCS_DIR/gcs-connector-hadoop2-latest.jar
|
curl https://storage.googleapis.com/hadoop-lib/gcs/gcs-connector-hadoop2-latest.jar --output $HADOOP_GCS_DIR/gcs-connector-hadoop2-latest.jar
|
||||||
cp $HADOOP_GCS_DIR/gcs-connector-hadoop2-latest.jar $DRUID_HDFS_EXT
|
cp $HADOOP_GCS_DIR/gcs-connector-hadoop2-latest.jar $DRUID_HDFS_EXT
|
||||||
fi
|
fi
|
||||||
|
@ -36,7 +36,11 @@ else
|
|||||||
;;
|
;;
|
||||||
15)
|
15)
|
||||||
echo "Build druid-cluster with Java 15"
|
echo "Build druid-cluster with Java 15"
|
||||||
docker build -t druid/cluster --build-arg JDK_VERSION=15-slim-buster --build-arg ZK_VERSION --build-arg KAFKA_VERSION --build-arg CONFLUENT_VERSION --build-arg MYSQL_VERSION --build-arg MARIA_VERSION --build-arg USE_MARIA --build-arg APACHE_ARCHIVE_MIRROR_HOST $SHARED_DIR/docker
|
docker build -t druid/cluster --build-arg JDK_VERSION=15-slim-buster --build-arg ZK_VERSION --build-arg KAFKA_VERSION --build-arg CONFLUENT_VERSION --build-arg MYSQL_VERSION --build-arg MARIA_VERSION --build-arg MYSQL_DRIVER_CLASSNAME --build-arg APACHE_ARCHIVE_MIRROR_HOST $SHARED_DIR/docker
|
||||||
|
;;
|
||||||
|
17)
|
||||||
|
echo "Build druid-cluster with Java 17"
|
||||||
|
docker build -t druid/cluster --build-arg JDK_VERSION=17-slim-buster --build-arg ZK_VERSION --build-arg KAFKA_VERSION --build-arg CONFLUENT_VERSION --build-arg MYSQL_VERSION --build-arg MARIA_VERSION --build-arg MYSQL_DRIVER_CLASSNAME --build-arg APACHE_ARCHIVE_MIRROR_HOST $SHARED_DIR/docker
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo "Invalid JVM Runtime given. Stopping"
|
echo "Invalid JVM Runtime given. Stopping"
|
||||||
|
@ -354,7 +354,7 @@ name: Error Prone Annotations
|
|||||||
license_category: binary
|
license_category: binary
|
||||||
module: java-core
|
module: java-core
|
||||||
license_name: Apache License version 2.0
|
license_name: Apache License version 2.0
|
||||||
version: 2.11.0
|
version: 2.20.0
|
||||||
libraries:
|
libraries:
|
||||||
- com.google.errorprone: error_prone_annotations
|
- com.google.errorprone: error_prone_annotations
|
||||||
|
|
||||||
|
32
pom.xml
32
pom.xml
@ -91,7 +91,7 @@
|
|||||||
<datasketches.memory.version>2.2.0</datasketches.memory.version>
|
<datasketches.memory.version>2.2.0</datasketches.memory.version>
|
||||||
<derby.version>10.14.2.0</derby.version>
|
<derby.version>10.14.2.0</derby.version>
|
||||||
<dropwizard.metrics.version>4.0.0</dropwizard.metrics.version>
|
<dropwizard.metrics.version>4.0.0</dropwizard.metrics.version>
|
||||||
<errorprone.version>2.11.0</errorprone.version>
|
<errorprone.version>2.20.0</errorprone.version>
|
||||||
<fastutil.version>8.5.4</fastutil.version>
|
<fastutil.version>8.5.4</fastutil.version>
|
||||||
<guava.version>16.0.1</guava.version>
|
<guava.version>16.0.1</guava.version>
|
||||||
<guice.version>4.1.0</guice.version>
|
<guice.version>4.1.0</guice.version>
|
||||||
@ -124,7 +124,7 @@
|
|||||||
<com.google.apis.client.version>1.26.0</com.google.apis.client.version>
|
<com.google.apis.client.version>1.26.0</com.google.apis.client.version>
|
||||||
<com.google.apis.compute.version>v1-rev20190607-${com.google.apis.client.version}</com.google.apis.compute.version>
|
<com.google.apis.compute.version>v1-rev20190607-${com.google.apis.client.version}</com.google.apis.compute.version>
|
||||||
<com.google.apis.storage.version>v1-rev20190523-${com.google.apis.client.version}</com.google.apis.storage.version>
|
<com.google.apis.storage.version>v1-rev20190523-${com.google.apis.client.version}</com.google.apis.storage.version>
|
||||||
<jdk.surefire.argLine><!-- empty placeholder --></jdk.surefire.argLine>
|
<jdk.strong.encapsulation.argLine><!-- empty placeholder --></jdk.strong.encapsulation.argLine>
|
||||||
<repoOrgId>maven.org</repoOrgId>
|
<repoOrgId>maven.org</repoOrgId>
|
||||||
<repoOrgName>Maven Central Repository</repoOrgName>
|
<repoOrgName>Maven Central Repository</repoOrgName>
|
||||||
<repoOrgUrl>https://repo1.maven.org/maven2/</repoOrgUrl>
|
<repoOrgUrl>https://repo1.maven.org/maven2/</repoOrgUrl>
|
||||||
@ -1622,7 +1622,7 @@
|
|||||||
<!-- set default options -->
|
<!-- set default options -->
|
||||||
<argLine>
|
<argLine>
|
||||||
@{jacocoArgLine}
|
@{jacocoArgLine}
|
||||||
${jdk.surefire.argLine}
|
${jdk.strong.encapsulation.argLine}
|
||||||
-Xmx1500m
|
-Xmx1500m
|
||||||
-XX:MaxDirectMemorySize=2500m
|
-XX:MaxDirectMemorySize=2500m
|
||||||
-XX:+ExitOnOutOfMemoryError
|
-XX:+ExitOnOutOfMemoryError
|
||||||
@ -1746,7 +1746,7 @@
|
|||||||
<plugin>
|
<plugin>
|
||||||
<groupId>org.apache.maven.plugins</groupId>
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
<artifactId>maven-compiler-plugin</artifactId>
|
<artifactId>maven-compiler-plugin</artifactId>
|
||||||
<version>3.8.1</version>
|
<version>3.11.0</version>
|
||||||
<configuration>
|
<configuration>
|
||||||
<source>${maven.compiler.source}</source>
|
<source>${maven.compiler.source}</source>
|
||||||
<target>${maven.compiler.target}</target>
|
<target>${maven.compiler.target}</target>
|
||||||
@ -1768,13 +1768,20 @@
|
|||||||
<jdk>[9,)</jdk>
|
<jdk>[9,)</jdk>
|
||||||
</activation>
|
</activation>
|
||||||
<properties>
|
<properties>
|
||||||
<jdk.surefire.argLine>
|
<jdk.strong.encapsulation.argLine>
|
||||||
<!-- required for DataSketches Memory on Java 11+ -->
|
<!-- Strong encapsulation parameters -->
|
||||||
|
<!-- When updating this list, update all four locations: -->
|
||||||
|
<!-- 1) ForkingTaskRunner#STRONG_ENCAPSULATION_PROPERTIES -->
|
||||||
|
<!-- 2) docs/operations/java.md, "Strong encapsulation" section -->
|
||||||
|
<!-- 3) pom.xml, jdk.strong.encapsulation.argLine (here) -->
|
||||||
|
<!-- 4) examples/bin/run-java script -->
|
||||||
|
|
||||||
|
<!-- required for DataSketches Memory -->
|
||||||
--add-exports=java.base/jdk.internal.ref=ALL-UNNAMED
|
--add-exports=java.base/jdk.internal.ref=ALL-UNNAMED
|
||||||
--add-exports=java.base/jdk.internal.misc=ALL-UNNAMED
|
--add-exports=java.base/jdk.internal.misc=ALL-UNNAMED
|
||||||
--add-opens=java.base/jdk.internal.ref=ALL-UNNAMED
|
|
||||||
--add-opens=java.base/java.nio=ALL-UNNAMED
|
--add-opens=java.base/java.nio=ALL-UNNAMED
|
||||||
--add-opens=java.base/sun.nio.ch=ALL-UNNAMED
|
--add-opens=java.base/sun.nio.ch=ALL-UNNAMED
|
||||||
|
--add-opens=java.base/jdk.internal.ref=ALL-UNNAMED
|
||||||
|
|
||||||
<!-- required for NativeIO#getfd -->
|
<!-- required for NativeIO#getfd -->
|
||||||
--add-opens=java.base/java.io=ALL-UNNAMED
|
--add-opens=java.base/java.io=ALL-UNNAMED
|
||||||
@ -1782,10 +1789,12 @@
|
|||||||
<!-- required for Guice -->
|
<!-- required for Guice -->
|
||||||
--add-opens=java.base/java.lang=ALL-UNNAMED
|
--add-opens=java.base/java.lang=ALL-UNNAMED
|
||||||
|
|
||||||
<!-- required for certain EqualsVerifier tests -->
|
<!-- required for metrics -->
|
||||||
<!-- (not required in production) -->
|
--add-opens=jdk.management/com.sun.management.internal=ALL-UNNAMED
|
||||||
|
|
||||||
|
<!-- required for certain EqualsVerifier tests (not required in production) -->
|
||||||
--add-opens=java.base/java.util=ALL-UNNAMED
|
--add-opens=java.base/java.util=ALL-UNNAMED
|
||||||
</jdk.surefire.argLine>
|
</jdk.strong.encapsulation.argLine>
|
||||||
</properties>
|
</properties>
|
||||||
<build>
|
<build>
|
||||||
<plugins>
|
<plugins>
|
||||||
@ -1821,7 +1830,7 @@
|
|||||||
<showWarnings>false</showWarnings>
|
<showWarnings>false</showWarnings>
|
||||||
<compilerArgs>
|
<compilerArgs>
|
||||||
<arg>-XDcompilePolicy=simple</arg>
|
<arg>-XDcompilePolicy=simple</arg>
|
||||||
<arg>-Xplugin:ErrorProne -XepExcludedPaths:.*/target/generated-(test-)?sources/.* -XepDisableWarningsInGeneratedCode -Xep:ClassCanBeStatic:ERROR -Xep:PreconditionsInvalidPlaceholder:ERROR -Xep:MissingOverride:ERROR -Xep:DefaultCharset:ERROR -Xep:QualifierOrScopeOnInjectMethod:ERROR -Xep:AssistedInjectAndInjectOnSameConstructor -Xep:AutoFactoryAtInject -Xep:ClassName -Xep:ComparisonContractViolated -Xep:DepAnn -Xep:DivZero -Xep:EmptyIf -Xep:InjectInvalidTargetingOnScopingAnnotation -Xep:InjectMoreThanOneQualifier -Xep:InjectScopeAnnotationOnInterfaceOrAbstractClass -Xep:InjectScopeOrQualifierAnnotationRetention -Xep:InjectedConstructorAnnotations -Xep:InsecureCryptoUsage -Xep:JMockTestWithoutRunWithOrRuleAnnotation -Xep:JavaxInjectOnFinalField -Xep:LockMethodChecker -Xep:LongLiteralLowerCaseSuffix -Xep:NoAllocation -Xep:NonRuntimeAnnotation -Xep:NumericEquality -Xep:ProtoStringFieldReferenceEquality -Xep:UnlockMethod</arg>
|
<arg>-Xplugin:ErrorProne -XepExcludedPaths:.*/target/generated-(test-)?sources/.* -XepDisableWarningsInGeneratedCode -Xep:ClassCanBeStatic:ERROR -Xep:PreconditionsInvalidPlaceholder:ERROR -Xep:MissingOverride:ERROR -Xep:DefaultCharset:ERROR -Xep:QualifierOrScopeOnInjectMethod:ERROR -Xep:AssistedInjectAndInjectOnSameConstructor -Xep:AutoFactoryAtInject -Xep:ClassName -Xep:ComparisonContractViolated -Xep:DepAnn -Xep:EmptyIf -Xep:InjectInvalidTargetingOnScopingAnnotation -Xep:InjectMoreThanOneQualifier -Xep:InjectScopeAnnotationOnInterfaceOrAbstractClass -Xep:InjectScopeOrQualifierAnnotationRetention -Xep:InjectedConstructorAnnotations -Xep:InsecureCryptoUsage -Xep:JavaxInjectOnFinalField -Xep:LongLiteralLowerCaseSuffix -Xep:NoAllocation -Xep:NonRuntimeAnnotation -Xep:NumericEquality -Xep:ProtoStringFieldReferenceEquality</arg>
|
||||||
<!-- Error Prone requires exemptions for Java >= 16, see https://errorprone.info/docs/installation#maven -->
|
<!-- Error Prone requires exemptions for Java >= 16, see https://errorprone.info/docs/installation#maven -->
|
||||||
<arg>-J--add-exports=jdk.compiler/com.sun.tools.javac.api=ALL-UNNAMED</arg>
|
<arg>-J--add-exports=jdk.compiler/com.sun.tools.javac.api=ALL-UNNAMED</arg>
|
||||||
<arg>-J--add-exports=jdk.compiler/com.sun.tools.javac.file=ALL-UNNAMED</arg>
|
<arg>-J--add-exports=jdk.compiler/com.sun.tools.javac.file=ALL-UNNAMED</arg>
|
||||||
@ -2046,6 +2055,7 @@
|
|||||||
<id>skip-static-checks</id>
|
<id>skip-static-checks</id>
|
||||||
<properties>
|
<properties>
|
||||||
<animal.sniffer.skip>true</animal.sniffer.skip>
|
<animal.sniffer.skip>true</animal.sniffer.skip>
|
||||||
|
<cyclonedx.skip>true</cyclonedx.skip>
|
||||||
<checkstyle.skip>true</checkstyle.skip>
|
<checkstyle.skip>true</checkstyle.skip>
|
||||||
<enforcer.skip>true</enforcer.skip>
|
<enforcer.skip>true</enforcer.skip>
|
||||||
<forbiddenapis.skip>true</forbiddenapis.skip>
|
<forbiddenapis.skip>true</forbiddenapis.skip>
|
||||||
|
@ -524,7 +524,7 @@ public class FrameProcessorExecutor
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Wait for all running processors to stop running. Then clean them up outside the critical section.
|
// Wait for all running processors to stop running. Then clean them up outside the critical section.
|
||||||
while (processorsToCancel.stream().anyMatch(runningProcessors::containsKey)) {
|
while (anyIsRunning(processorsToCancel)) {
|
||||||
lock.wait();
|
lock.wait();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -605,4 +605,16 @@ public class FrameProcessorExecutor
|
|||||||
log.debug(StringUtils.encodeForFormat(sb.toString()));
|
log.debug(StringUtils.encodeForFormat(sb.toString()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@GuardedBy("lock")
|
||||||
|
private boolean anyIsRunning(Set<FrameProcessor<?>> processors)
|
||||||
|
{
|
||||||
|
for (final FrameProcessor<?> processor : processors) {
|
||||||
|
if (runningProcessors.containsKey(processor)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -45,6 +45,7 @@ import java.util.Map;
|
|||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
*
|
||||||
*/
|
*/
|
||||||
public class ArithmeticPostAggregator implements PostAggregator
|
public class ArithmeticPostAggregator implements PostAggregator
|
||||||
{
|
{
|
||||||
@ -75,7 +76,11 @@ public class ArithmeticPostAggregator implements PostAggregator
|
|||||||
)
|
)
|
||||||
{
|
{
|
||||||
Preconditions.checkArgument(fnName != null, "fn cannot not be null");
|
Preconditions.checkArgument(fnName != null, "fn cannot not be null");
|
||||||
Preconditions.checkArgument(fields != null && fields.size() > 1, "Illegal number of fields[%s], must be > 1");
|
Preconditions.checkArgument(
|
||||||
|
fields != null && fields.size() > 1,
|
||||||
|
"Illegal number of fields[%s], must be > 1",
|
||||||
|
fields.size()
|
||||||
|
);
|
||||||
|
|
||||||
this.name = name;
|
this.name = name;
|
||||||
this.fnName = fnName;
|
this.fnName = fnName;
|
||||||
|
@ -53,7 +53,11 @@ public class DoubleGreatestPostAggregator implements PostAggregator
|
|||||||
@JsonProperty("fields") List<PostAggregator> fields
|
@JsonProperty("fields") List<PostAggregator> fields
|
||||||
)
|
)
|
||||||
{
|
{
|
||||||
Preconditions.checkArgument(fields != null && fields.size() > 0, "Illegal number of fields[%s], must be > 0");
|
Preconditions.checkArgument(
|
||||||
|
fields != null && fields.size() > 0,
|
||||||
|
"Illegal number of fields[%s], must be > 0",
|
||||||
|
fields.size()
|
||||||
|
);
|
||||||
|
|
||||||
this.name = name;
|
this.name = name;
|
||||||
this.fields = fields;
|
this.fields = fields;
|
||||||
|
@ -53,7 +53,11 @@ public class DoubleLeastPostAggregator implements PostAggregator
|
|||||||
@JsonProperty("fields") List<PostAggregator> fields
|
@JsonProperty("fields") List<PostAggregator> fields
|
||||||
)
|
)
|
||||||
{
|
{
|
||||||
Preconditions.checkArgument(fields != null && fields.size() > 0, "Illegal number of fields[%s], must be > 0");
|
Preconditions.checkArgument(
|
||||||
|
fields != null && fields.size() > 0,
|
||||||
|
"Illegal number of fields[%s], must be > 0",
|
||||||
|
fields.size()
|
||||||
|
);
|
||||||
|
|
||||||
this.name = name;
|
this.name = name;
|
||||||
this.fields = fields;
|
this.fields = fields;
|
||||||
|
@ -53,7 +53,11 @@ public class LongGreatestPostAggregator implements PostAggregator
|
|||||||
@JsonProperty("fields") List<PostAggregator> fields
|
@JsonProperty("fields") List<PostAggregator> fields
|
||||||
)
|
)
|
||||||
{
|
{
|
||||||
Preconditions.checkArgument(fields != null && fields.size() > 0, "Illegal number of fields[%s], must be > 0");
|
Preconditions.checkArgument(
|
||||||
|
fields != null && fields.size() > 0,
|
||||||
|
"Illegal number of fields[%s], must be > 0",
|
||||||
|
fields.size()
|
||||||
|
);
|
||||||
|
|
||||||
this.name = name;
|
this.name = name;
|
||||||
this.fields = fields;
|
this.fields = fields;
|
||||||
|
@ -53,7 +53,11 @@ public class LongLeastPostAggregator implements PostAggregator
|
|||||||
@JsonProperty("fields") List<PostAggregator> fields
|
@JsonProperty("fields") List<PostAggregator> fields
|
||||||
)
|
)
|
||||||
{
|
{
|
||||||
Preconditions.checkArgument(fields != null && fields.size() > 0, "Illegal number of fields[%s], must be > 0");
|
Preconditions.checkArgument(
|
||||||
|
fields != null && fields.size() > 0,
|
||||||
|
"Illegal number of fields[%s], must be > 0",
|
||||||
|
fields.size()
|
||||||
|
);
|
||||||
|
|
||||||
this.name = name;
|
this.name = name;
|
||||||
this.fields = fields;
|
this.fields = fields;
|
||||||
|
@ -1164,19 +1164,16 @@ public class MultiValuedDimensionTest extends InitializedNullHandlingTest
|
|||||||
null
|
null
|
||||||
);
|
);
|
||||||
Sequence<Result<TopNResultValue>> result = runner.run(QueryPlus.wrap(query));
|
Sequence<Result<TopNResultValue>> result = runner.run(QueryPlus.wrap(query));
|
||||||
|
|
||||||
|
final Map<String, Object> thirdMap = new HashMap<>();
|
||||||
|
thirdMap.put("texpr", NullHandling.sqlCompatible() ? "foo" : null);
|
||||||
|
thirdMap.put("count", 1L);
|
||||||
|
|
||||||
List<Map<String, Object>> expected =
|
List<Map<String, Object>> expected =
|
||||||
ImmutableList.<Map<String, Object>>builder()
|
ImmutableList.<Map<String, Object>>builder()
|
||||||
.add(ImmutableMap.of("texpr", "t3foo", "count", 2L))
|
.add(ImmutableMap.of("texpr", "t3foo", "count", 2L))
|
||||||
.add(ImmutableMap.of("texpr", "t5foo", "count", 2L))
|
.add(ImmutableMap.of("texpr", "t5foo", "count", 2L))
|
||||||
.add(
|
.add(thirdMap)
|
||||||
new HashMap<String, Object>()
|
|
||||||
{
|
|
||||||
{
|
|
||||||
put("texpr", NullHandling.sqlCompatible() ? "foo" : null);
|
|
||||||
put("count", 1L);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)
|
|
||||||
.add(ImmutableMap.of("texpr", "t1foo", "count", 1L))
|
.add(ImmutableMap.of("texpr", "t1foo", "count", 1L))
|
||||||
.add(ImmutableMap.of("texpr", "t2foo", "count", 1L))
|
.add(ImmutableMap.of("texpr", "t2foo", "count", 1L))
|
||||||
.add(ImmutableMap.of("texpr", "t4foo", "count", 1L))
|
.add(ImmutableMap.of("texpr", "t4foo", "count", 1L))
|
||||||
@ -1185,7 +1182,7 @@ public class MultiValuedDimensionTest extends InitializedNullHandlingTest
|
|||||||
.build();
|
.build();
|
||||||
|
|
||||||
List<Result<TopNResultValue>> expectedResults = Collections.singletonList(
|
List<Result<TopNResultValue>> expectedResults = Collections.singletonList(
|
||||||
new Result<TopNResultValue>(
|
new Result<>(
|
||||||
DateTimes.of("2011-01-12T00:00:00.000Z"),
|
DateTimes.of("2011-01-12T00:00:00.000Z"),
|
||||||
new TopNResultValue(
|
new TopNResultValue(
|
||||||
expected
|
expected
|
||||||
|
@ -38,23 +38,9 @@ import java.util.List;
|
|||||||
|
|
||||||
public class ArrayDoubleGroupByColumnSelectorStrategyTest
|
public class ArrayDoubleGroupByColumnSelectorStrategyTest
|
||||||
{
|
{
|
||||||
protected final List<List<Double>> dictionary = new ArrayList<List<Double>>()
|
protected final List<List<Double>> dictionary = new ArrayList<>();
|
||||||
{
|
|
||||||
{
|
|
||||||
add(ImmutableList.of(1.0, 2.0));
|
|
||||||
add(ImmutableList.of(2.0, 3.0));
|
|
||||||
add(ImmutableList.of(1.0));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
protected final Object2IntOpenHashMap<List<Double>> reverseDictionary = new Object2IntOpenHashMap<List<Double>>()
|
protected final Object2IntOpenHashMap<List<Double>> reverseDictionary = new Object2IntOpenHashMap<>();
|
||||||
{
|
|
||||||
{
|
|
||||||
put(ImmutableList.of(1.0, 2.0), 0);
|
|
||||||
put(ImmutableList.of(2.0, 3.0), 1);
|
|
||||||
put(ImmutableList.of(1.0), 2);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
private final ByteBuffer buffer1 = ByteBuffer.allocate(4);
|
private final ByteBuffer buffer1 = ByteBuffer.allocate(4);
|
||||||
private final ByteBuffer buffer2 = ByteBuffer.allocate(4);
|
private final ByteBuffer buffer2 = ByteBuffer.allocate(4);
|
||||||
@ -64,7 +50,15 @@ public class ArrayDoubleGroupByColumnSelectorStrategyTest
|
|||||||
@Before
|
@Before
|
||||||
public void setup()
|
public void setup()
|
||||||
{
|
{
|
||||||
|
dictionary.add(ImmutableList.of(1.0, 2.0));
|
||||||
|
dictionary.add(ImmutableList.of(2.0, 3.0));
|
||||||
|
dictionary.add(ImmutableList.of(1.0));
|
||||||
|
|
||||||
reverseDictionary.defaultReturnValue(-1);
|
reverseDictionary.defaultReturnValue(-1);
|
||||||
|
reverseDictionary.put(ImmutableList.of(1.0, 2.0), 0);
|
||||||
|
reverseDictionary.put(ImmutableList.of(2.0, 3.0), 1);
|
||||||
|
reverseDictionary.put(ImmutableList.of(1.0), 2);
|
||||||
|
|
||||||
strategy = new ArrayDoubleGroupByColumnSelectorStrategy(dictionary, reverseDictionary);
|
strategy = new ArrayDoubleGroupByColumnSelectorStrategy(dictionary, reverseDictionary);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -41,23 +41,9 @@ import java.util.List;
|
|||||||
@RunWith(MockitoJUnitRunner.class)
|
@RunWith(MockitoJUnitRunner.class)
|
||||||
public class ArrayLongGroupByColumnSelectorStrategyTest
|
public class ArrayLongGroupByColumnSelectorStrategyTest
|
||||||
{
|
{
|
||||||
protected final List<List<Long>> dictionary = new ArrayList<List<Long>>()
|
protected final List<List<Long>> dictionary = new ArrayList<>();
|
||||||
{
|
|
||||||
{
|
|
||||||
add(ImmutableList.of(1L, 2L));
|
|
||||||
add(ImmutableList.of(2L, 3L));
|
|
||||||
add(ImmutableList.of(1L));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
protected final Object2IntOpenHashMap<List<Long>> reverseDictionary = new Object2IntOpenHashMap<List<Long>>()
|
protected final Object2IntOpenHashMap<List<Long>> reverseDictionary = new Object2IntOpenHashMap<>();
|
||||||
{
|
|
||||||
{
|
|
||||||
put(ImmutableList.of(1L, 2L), 0);
|
|
||||||
put(ImmutableList.of(2L, 3L), 1);
|
|
||||||
put(ImmutableList.of(1L), 2);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
private final ByteBuffer buffer1 = ByteBuffer.allocate(4);
|
private final ByteBuffer buffer1 = ByteBuffer.allocate(4);
|
||||||
private final ByteBuffer buffer2 = ByteBuffer.allocate(4);
|
private final ByteBuffer buffer2 = ByteBuffer.allocate(4);
|
||||||
@ -67,7 +53,15 @@ public class ArrayLongGroupByColumnSelectorStrategyTest
|
|||||||
@Before
|
@Before
|
||||||
public void setup()
|
public void setup()
|
||||||
{
|
{
|
||||||
|
dictionary.add(ImmutableList.of(1L, 2L));
|
||||||
|
dictionary.add(ImmutableList.of(2L, 3L));
|
||||||
|
dictionary.add(ImmutableList.of(1L));
|
||||||
|
|
||||||
reverseDictionary.defaultReturnValue(-1);
|
reverseDictionary.defaultReturnValue(-1);
|
||||||
|
reverseDictionary.put(ImmutableList.of(1L, 2L), 0);
|
||||||
|
reverseDictionary.put(ImmutableList.of(2L, 3L), 1);
|
||||||
|
reverseDictionary.put(ImmutableList.of(1L), 2);
|
||||||
|
|
||||||
strategy = new ArrayLongGroupByColumnSelectorStrategy(dictionary, reverseDictionary);
|
strategy = new ArrayLongGroupByColumnSelectorStrategy(dictionary, reverseDictionary);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -37,36 +37,15 @@ import org.mockito.Mockito;
|
|||||||
import org.mockito.junit.MockitoJUnitRunner;
|
import org.mockito.junit.MockitoJUnitRunner;
|
||||||
|
|
||||||
import java.nio.ByteBuffer;
|
import java.nio.ByteBuffer;
|
||||||
import java.util.HashMap;
|
|
||||||
|
|
||||||
@RunWith(MockitoJUnitRunner.class)
|
@RunWith(MockitoJUnitRunner.class)
|
||||||
public class ArrayStringGroupByColumnSelectorStrategyTest
|
public class ArrayStringGroupByColumnSelectorStrategyTest
|
||||||
{
|
{
|
||||||
|
private final BiMap<String, Integer> dictionaryInt = HashBiMap.create();
|
||||||
private final BiMap<String, Integer> DICTIONARY_INT = HashBiMap.create(new HashMap<String, Integer>()
|
|
||||||
{
|
|
||||||
{
|
|
||||||
put("a", 0);
|
|
||||||
put("b", 1);
|
|
||||||
put("bd", 2);
|
|
||||||
put("d", 3);
|
|
||||||
put("e", 4);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
// The dictionary has been constructed such that the values are not sorted lexicographically
|
// The dictionary has been constructed such that the values are not sorted lexicographically
|
||||||
// so we can tell when the comparator uses a lexicographic comparison and when it uses the indexes.
|
// so we can tell when the comparator uses a lexicographic comparison and when it uses the indexes.
|
||||||
private final BiMap<ComparableIntArray, Integer> INDEXED_INTARRAYS = HashBiMap.create(
|
private final BiMap<ComparableIntArray, Integer> indexedIntArrays = HashBiMap.create();
|
||||||
new HashMap<ComparableIntArray, Integer>()
|
|
||||||
{
|
|
||||||
{
|
|
||||||
put(ComparableIntArray.of(0, 1), 0);
|
|
||||||
put(ComparableIntArray.of(2, 4), 1);
|
|
||||||
put(ComparableIntArray.of(0, 2), 2);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
);
|
|
||||||
|
|
||||||
|
|
||||||
private final ByteBuffer buffer1 = ByteBuffer.allocate(4);
|
private final ByteBuffer buffer1 = ByteBuffer.allocate(4);
|
||||||
private final ByteBuffer buffer2 = ByteBuffer.allocate(4);
|
private final ByteBuffer buffer2 = ByteBuffer.allocate(4);
|
||||||
@ -76,7 +55,17 @@ public class ArrayStringGroupByColumnSelectorStrategyTest
|
|||||||
@Before
|
@Before
|
||||||
public void setup()
|
public void setup()
|
||||||
{
|
{
|
||||||
strategy = new ArrayStringGroupByColumnSelectorStrategy(DICTIONARY_INT, INDEXED_INTARRAYS);
|
strategy = new ArrayStringGroupByColumnSelectorStrategy(dictionaryInt, indexedIntArrays);
|
||||||
|
|
||||||
|
dictionaryInt.put("a", 0);
|
||||||
|
dictionaryInt.put("b", 1);
|
||||||
|
dictionaryInt.put("bd", 2);
|
||||||
|
dictionaryInt.put("d", 3);
|
||||||
|
dictionaryInt.put("e", 4);
|
||||||
|
|
||||||
|
indexedIntArrays.put(ComparableIntArray.of(0, 1), 0);
|
||||||
|
indexedIntArrays.put(ComparableIntArray.of(2, 4), 1);
|
||||||
|
indexedIntArrays.put(ComparableIntArray.of(0, 2), 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -20,6 +20,7 @@
|
|||||||
package org.apache.druid.segment;
|
package org.apache.druid.segment;
|
||||||
|
|
||||||
import com.google.common.collect.ImmutableList;
|
import com.google.common.collect.ImmutableList;
|
||||||
|
import com.google.common.collect.ImmutableMap;
|
||||||
import org.apache.druid.data.input.MapBasedInputRow;
|
import org.apache.druid.data.input.MapBasedInputRow;
|
||||||
import org.apache.druid.query.aggregation.AggregatorFactory;
|
import org.apache.druid.query.aggregation.AggregatorFactory;
|
||||||
import org.apache.druid.query.aggregation.first.StringFirstAggregatorFactory;
|
import org.apache.druid.query.aggregation.first.StringFirstAggregatorFactory;
|
||||||
@ -38,7 +39,6 @@ import java.io.File;
|
|||||||
import java.time.Instant;
|
import java.time.Instant;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
@ -66,20 +66,14 @@ public class IndexMergerRollupTest extends InitializedNullHandlingTest
|
|||||||
) throws Exception
|
) throws Exception
|
||||||
{
|
{
|
||||||
List<Map<String, Object>> eventsList = Arrays.asList(
|
List<Map<String, Object>> eventsList = Arrays.asList(
|
||||||
new HashMap<String, Object>()
|
ImmutableMap.of(
|
||||||
{
|
"d", "d1",
|
||||||
{
|
"m", "m1"
|
||||||
put("d", "d1");
|
),
|
||||||
put("m", "m1");
|
ImmutableMap.of(
|
||||||
}
|
"d", "d1",
|
||||||
},
|
"m", "m2"
|
||||||
new HashMap<String, Object>()
|
)
|
||||||
{
|
|
||||||
{
|
|
||||||
put("d", "d1");
|
|
||||||
put("m", "m2");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
);
|
);
|
||||||
|
|
||||||
final File tempDir = temporaryFolder.newFolder();
|
final File tempDir = temporaryFolder.newFolder();
|
||||||
|
@ -20,10 +20,10 @@
|
|||||||
package org.apache.druid.segment.data;
|
package org.apache.druid.segment.data;
|
||||||
|
|
||||||
import com.google.common.collect.ImmutableList;
|
import com.google.common.collect.ImmutableList;
|
||||||
|
import com.google.common.collect.Lists;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
@ -66,22 +66,10 @@ public class ComparableListTest
|
|||||||
Assert.assertEquals(1, comparableList.compareTo(new ComparableList(ImmutableList.of(1, 2))));
|
Assert.assertEquals(1, comparableList.compareTo(new ComparableList(ImmutableList.of(1, 2))));
|
||||||
Assert.assertEquals(-1, comparableList.compareTo(new ComparableList(ImmutableList.of(1, 2, 3, 4))));
|
Assert.assertEquals(-1, comparableList.compareTo(new ComparableList(ImmutableList.of(1, 2, 3, 4))));
|
||||||
Assert.assertTrue(comparableList.compareTo(new ComparableList(ImmutableList.of(2))) < 0);
|
Assert.assertTrue(comparableList.compareTo(new ComparableList(ImmutableList.of(2))) < 0);
|
||||||
ComparableList nullList = new ComparableList(new ArrayList<Integer>()
|
ComparableList nullList = new ComparableList(Lists.newArrayList(null, 1));
|
||||||
{
|
|
||||||
{
|
|
||||||
add(null);
|
|
||||||
add(1);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
Assert.assertTrue(comparableList.compareTo(nullList) > 0);
|
Assert.assertTrue(comparableList.compareTo(nullList) > 0);
|
||||||
Assert.assertTrue(nullList.compareTo(comparableList) < 0);
|
Assert.assertTrue(nullList.compareTo(comparableList) < 0);
|
||||||
Assert.assertTrue(nullList.compareTo(new ComparableList(new ArrayList<Integer>()
|
Assert.assertTrue(nullList.compareTo(new ComparableList(Lists.newArrayList(null, 1))) == 0);
|
||||||
{
|
|
||||||
{
|
|
||||||
add(null);
|
|
||||||
add(1);
|
|
||||||
}
|
|
||||||
})) == 0);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -544,7 +544,7 @@ public class Expressions
|
|||||||
|| kind == SqlKind.LESS_THAN
|
|| kind == SqlKind.LESS_THAN
|
||||||
|| kind == SqlKind.LESS_THAN_OR_EQUAL) {
|
|| kind == SqlKind.LESS_THAN_OR_EQUAL) {
|
||||||
final List<RexNode> operands = ((RexCall) rexNode).getOperands();
|
final List<RexNode> operands = ((RexCall) rexNode).getOperands();
|
||||||
Preconditions.checkState(operands.size() == 2, "Expected 2 operands, got[%,d]", operands.size());
|
Preconditions.checkState(operands.size() == 2, "Expected 2 operands, got[%s]", operands.size());
|
||||||
boolean flip = false;
|
boolean flip = false;
|
||||||
RexNode lhs = operands.get(0);
|
RexNode lhs = operands.get(0);
|
||||||
RexNode rhs = operands.get(1);
|
RexNode rhs = operands.get(1);
|
||||||
|
@ -276,7 +276,7 @@ public class DruidJoinRule extends RelOptRule
|
|||||||
}
|
}
|
||||||
|
|
||||||
final List<RexNode> operands = ((RexCall) subCondition).getOperands();
|
final List<RexNode> operands = ((RexCall) subCondition).getOperands();
|
||||||
Preconditions.checkState(operands.size() == 2, "Expected 2 operands, got[%,d]", operands.size());
|
Preconditions.checkState(operands.size() == 2, "Expected 2 operands, got[%s]", operands.size());
|
||||||
|
|
||||||
if (isLeftExpression(operands.get(0), numLeftFields) && isRightInputRef(operands.get(1), numLeftFields)) {
|
if (isLeftExpression(operands.get(0), numLeftFields) && isRightInputRef(operands.get(1), numLeftFields)) {
|
||||||
equalitySubConditions.add(Pair.of(operands.get(0), (RexInputRef) operands.get(1)));
|
equalitySubConditions.add(Pair.of(operands.get(0), (RexInputRef) operands.get(1)));
|
||||||
|
@ -5095,7 +5095,8 @@ public class CalciteJoinQueryTest extends BaseCalciteQueryTest
|
|||||||
.context(queryContext)
|
.context(queryContext)
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
assert query.context().getEnableJoinFilterPushDown(); // filter pushdown must be enabled
|
Assert.assertTrue("filter pushdown must be enabled", query.context().getEnableJoinFilterPushDown());
|
||||||
|
|
||||||
// no results will be produced since the filter values aren't in the table
|
// no results will be produced since the filter values aren't in the table
|
||||||
testQuery(
|
testQuery(
|
||||||
"SELECT f1.m1, f2.m1\n"
|
"SELECT f1.m1, f2.m1\n"
|
||||||
@ -5217,7 +5218,8 @@ public class CalciteJoinQueryTest extends BaseCalciteQueryTest
|
|||||||
.context(queryContext)
|
.context(queryContext)
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
assert query.context().getEnableJoinFilterPushDown(); // filter pushdown must be enabled
|
Assert.assertTrue("filter pushdown must be enabled", query.context().getEnableJoinFilterPushDown());
|
||||||
|
|
||||||
// (dim1, dim2, m1) in foo look like
|
// (dim1, dim2, m1) in foo look like
|
||||||
// [, a, 1.0]
|
// [, a, 1.0]
|
||||||
// [10.1, , 2.0]
|
// [10.1, , 2.0]
|
||||||
|
@ -759,15 +759,12 @@ public class SqlResourceTest extends CalciteTestBase
|
|||||||
Assert.assertEquals(200, response.getStatus());
|
Assert.assertEquals(200, response.getStatus());
|
||||||
Assert.assertEquals("yes", response.getHeader("X-Druid-SQL-Header-Included"));
|
Assert.assertEquals("yes", response.getHeader("X-Druid-SQL-Header-Included"));
|
||||||
Assert.assertEquals(
|
Assert.assertEquals(
|
||||||
new ArrayList<Object>()
|
ImmutableList.builder()
|
||||||
{
|
.add(EXPECTED_COLUMNS_FOR_RESULT_FORMAT_TESTS)
|
||||||
{
|
.add(EXPECTED_TYPES_FOR_RESULT_FORMAT_TESTS)
|
||||||
add(EXPECTED_COLUMNS_FOR_RESULT_FORMAT_TESTS);
|
.add(EXPECTED_SQL_TYPES_FOR_RESULT_FORMAT_TESTS)
|
||||||
add(EXPECTED_TYPES_FOR_RESULT_FORMAT_TESTS);
|
.addAll(Arrays.asList(expectedQueryResults))
|
||||||
add(EXPECTED_SQL_TYPES_FOR_RESULT_FORMAT_TESTS);
|
.build(),
|
||||||
addAll(Arrays.asList(expectedQueryResults));
|
|
||||||
}
|
|
||||||
},
|
|
||||||
JSON_MAPPER.readValue(response.baos.toByteArray(), Object.class)
|
JSON_MAPPER.readValue(response.baos.toByteArray(), Object.class)
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -779,14 +776,11 @@ public class SqlResourceTest extends CalciteTestBase
|
|||||||
Assert.assertEquals(200, responseNoSqlTypesHeader.getStatus());
|
Assert.assertEquals(200, responseNoSqlTypesHeader.getStatus());
|
||||||
Assert.assertEquals("yes", responseNoSqlTypesHeader.getHeader("X-Druid-SQL-Header-Included"));
|
Assert.assertEquals("yes", responseNoSqlTypesHeader.getHeader("X-Druid-SQL-Header-Included"));
|
||||||
Assert.assertEquals(
|
Assert.assertEquals(
|
||||||
new ArrayList<Object>()
|
ImmutableList.builder()
|
||||||
{
|
.add(EXPECTED_COLUMNS_FOR_RESULT_FORMAT_TESTS)
|
||||||
{
|
.add(EXPECTED_TYPES_FOR_RESULT_FORMAT_TESTS)
|
||||||
add(EXPECTED_COLUMNS_FOR_RESULT_FORMAT_TESTS);
|
.addAll(Arrays.asList(expectedQueryResults))
|
||||||
add(EXPECTED_TYPES_FOR_RESULT_FORMAT_TESTS);
|
.build(),
|
||||||
addAll(Arrays.asList(expectedQueryResults));
|
|
||||||
}
|
|
||||||
},
|
|
||||||
JSON_MAPPER.readValue(responseNoSqlTypesHeader.baos.toByteArray(), Object.class)
|
JSON_MAPPER.readValue(responseNoSqlTypesHeader.baos.toByteArray(), Object.class)
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -798,14 +792,11 @@ public class SqlResourceTest extends CalciteTestBase
|
|||||||
Assert.assertEquals(200, responseNoTypesHeader.getStatus());
|
Assert.assertEquals(200, responseNoTypesHeader.getStatus());
|
||||||
Assert.assertEquals("yes", responseNoTypesHeader.getHeader("X-Druid-SQL-Header-Included"));
|
Assert.assertEquals("yes", responseNoTypesHeader.getHeader("X-Druid-SQL-Header-Included"));
|
||||||
Assert.assertEquals(
|
Assert.assertEquals(
|
||||||
new ArrayList<Object>()
|
ImmutableList.builder()
|
||||||
{
|
.add(EXPECTED_COLUMNS_FOR_RESULT_FORMAT_TESTS)
|
||||||
{
|
.add(EXPECTED_SQL_TYPES_FOR_RESULT_FORMAT_TESTS)
|
||||||
add(EXPECTED_COLUMNS_FOR_RESULT_FORMAT_TESTS);
|
.addAll(Arrays.asList(expectedQueryResults))
|
||||||
add(EXPECTED_SQL_TYPES_FOR_RESULT_FORMAT_TESTS);
|
.build(),
|
||||||
addAll(Arrays.asList(expectedQueryResults));
|
|
||||||
}
|
|
||||||
},
|
|
||||||
JSON_MAPPER.readValue(responseNoTypesHeader.baos.toByteArray(), Object.class)
|
JSON_MAPPER.readValue(responseNoTypesHeader.baos.toByteArray(), Object.class)
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -817,13 +808,10 @@ public class SqlResourceTest extends CalciteTestBase
|
|||||||
Assert.assertEquals(200, responseNoTypes.getStatus());
|
Assert.assertEquals(200, responseNoTypes.getStatus());
|
||||||
Assert.assertEquals("yes", responseNoTypes.getHeader("X-Druid-SQL-Header-Included"));
|
Assert.assertEquals("yes", responseNoTypes.getHeader("X-Druid-SQL-Header-Included"));
|
||||||
Assert.assertEquals(
|
Assert.assertEquals(
|
||||||
new ArrayList<Object>()
|
ImmutableList.builder()
|
||||||
{
|
.add(EXPECTED_COLUMNS_FOR_RESULT_FORMAT_TESTS)
|
||||||
{
|
.addAll(Arrays.asList(expectedQueryResults))
|
||||||
add(EXPECTED_COLUMNS_FOR_RESULT_FORMAT_TESTS);
|
.build(),
|
||||||
addAll(Arrays.asList(expectedQueryResults));
|
|
||||||
}
|
|
||||||
},
|
|
||||||
JSON_MAPPER.readValue(responseNoTypes.baos.toByteArray(), Object.class)
|
JSON_MAPPER.readValue(responseNoTypes.baos.toByteArray(), Object.class)
|
||||||
);
|
);
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user