Fix ITSqlInputSourceTest (#10194)

* Fix ITSqlInputSourceTest.java

* Fix ITSqlInputSourceTest.java

* Fix ITSqlInputSourceTest.java

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix
This commit is contained in:
Maytas Monsereenusorn 2020-07-21 09:52:13 -07:00 committed by GitHub
parent 41982116f4
commit dd7a32ad48
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 34 additions and 17 deletions

View File

@ -321,6 +321,14 @@ jobs:
script: *run_integration_test
after_failure: *integration_test_diags
- &integration_input_source
name: "(Compile=openjdk8, Run=openjdk8) input source integration test"
jdk: openjdk8
services: *integration_test_services
env: TESTNG_GROUPS='-Dgroups=input-source' JVM_RUNTIME='-Djvm.runtime=8'
script: *run_integration_test
after_failure: *integration_test_diags
- &integration_perfect_rollup_parallel_batch_index
name: "(Compile=openjdk8, Run=openjdk8) perfect rollup parallel batch index integration test"
jdk: openjdk8
@ -405,7 +413,7 @@ jobs:
name: "(Compile=openjdk8, Run=openjdk8) other integration test"
jdk: openjdk8
services: *integration_test_services
env: TESTNG_GROUPS='-DexcludedGroups=batch-index,input-format,perfect-rollup-parallel-batch-index,kafka-index,query,realtime-index,security,s3-deep-storage,gcs-deep-storage,azure-deep-storage,hdfs-deep-storage,s3-ingestion,kinesis-index,kinesis-data-format,kafka-transactional-index,kafka-index-slow,kafka-transactional-index-slow,kafka-data-format,hadoop-s3-to-s3-deep-storage,hadoop-s3-to-hdfs-deep-storage,hadoop-azure-to-azure-deep-storage,hadoop-azure-to-hdfs-deep-storage,hadoop-gcs-to-gcs-deep-storage,hadoop-gcs-to-hdfs-deep-storage,aliyun-oss-deep-storage,append-ingestion' JVM_RUNTIME='-Djvm.runtime=11'
env: TESTNG_GROUPS='-DexcludedGroups=batch-index,input-format,input-source,perfect-rollup-parallel-batch-index,kafka-index,query,realtime-index,security,s3-deep-storage,gcs-deep-storage,azure-deep-storage,hdfs-deep-storage,s3-ingestion,kinesis-index,kinesis-data-format,kafka-transactional-index,kafka-index-slow,kafka-transactional-index-slow,kafka-data-format,hadoop-s3-to-s3-deep-storage,hadoop-s3-to-hdfs-deep-storage,hadoop-azure-to-azure-deep-storage,hadoop-azure-to-hdfs-deep-storage,hadoop-gcs-to-gcs-deep-storage,hadoop-gcs-to-hdfs-deep-storage,aliyun-oss-deep-storage,append-ingestion' JVM_RUNTIME='-Djvm.runtime=11'
script: *run_integration_test
after_failure: *integration_test_diags
# END - Integration tests for Compile with Java 8 and Run with Java 8
@ -421,6 +429,11 @@ jobs:
jdk: openjdk8
env: TESTNG_GROUPS='-Dgroups=input-format' JVM_RUNTIME='-Djvm.runtime=11'
- <<: *integration_input_source
name: "(Compile=openjdk8, Run=openjdk11) input source integration test"
jdk: openjdk8
env: TESTNG_GROUPS='-Dgroups=input-source' JVM_RUNTIME='-Djvm.runtime=11'
- <<: *integration_perfect_rollup_parallel_batch_index
name: "(Compile=openjdk8, Run=openjdk11) perfect rollup parallel batch index integration test"
jdk: openjdk8
@ -449,7 +462,7 @@ jobs:
- <<: *integration_tests
name: "(Compile=openjdk8, Run=openjdk11) other integration test"
jdk: openjdk8
env: TESTNG_GROUPS='-DexcludedGroups=batch-index,input-format,perfect-rollup-parallel-batch-index,kafka-index,query,realtime-index,security,s3-deep-storage,gcs-deep-storage,azure-deep-storage,hdfs-deep-storage,s3-ingestion,kinesis-index,kinesis-data-format,kafka-transactional-index,kafka-index-slow,kafka-transactional-index-slow,kafka-data-format,hadoop-s3-to-s3-deep-storage,hadoop-s3-to-hdfs-deep-storage,hadoop-azure-to-azure-deep-storage,hadoop-azure-to-hdfs-deep-storage,hadoop-gcs-to-gcs-deep-storage,hadoop-gcs-to-hdfs-deep-storage,aliyun-oss-deep-storage,append-ingestion' JVM_RUNTIME='-Djvm.runtime=11'
env: TESTNG_GROUPS='-DexcludedGroups=batch-index,input-format,input-source,perfect-rollup-parallel-batch-index,kafka-index,query,realtime-index,security,s3-deep-storage,gcs-deep-storage,azure-deep-storage,hdfs-deep-storage,s3-ingestion,kinesis-index,kinesis-data-format,kafka-transactional-index,kafka-index-slow,kafka-transactional-index-slow,kafka-data-format,hadoop-s3-to-s3-deep-storage,hadoop-s3-to-hdfs-deep-storage,hadoop-azure-to-azure-deep-storage,hadoop-azure-to-hdfs-deep-storage,hadoop-gcs-to-gcs-deep-storage,hadoop-gcs-to-hdfs-deep-storage,aliyun-oss-deep-storage,append-ingestion' JVM_RUNTIME='-Djvm.runtime=11'
# END - Integration tests for Compile with Java 8 and Run with Java 11
- name: "security vulnerabilities"

View File

@ -374,7 +374,11 @@ By default, test methods in a test class will be run in sequential order one at
class can be set to run in parallel (multiple test methods of each class running at the same time) by excluding
the given class/package from the "AllSerializedTests" test tag section and including it in the "AllParallelizedTests"
test tag section in integration-tests/src/test/resources/testng.xml. TestNG uses two parameters, i.e.,
`thread-count` and `data-provider-thread-count`, for parallel test execution, which are set to 2 for Druid integration tests.
`thread-count` and `data-provider-thread-count`, for parallel test execution, which are both set to 2 for Druid integration tests.
For test using parallel execution with data provider, you will also need to set `@DataProvider(parallel = true)`
on your data provider method in your test class. Note that for test using parallel execution with data provider, the test
class does not need to be in the "AllParallelizedTests" test tag section and if it is in the "AllParallelizedTests"
test tag section it will actually be run with `thread-count` times `data-provider-thread-count` threads.
You may want to modify those values for faster execution.
See https://testng.org/doc/documentation-main.html#parallel-running and https://testng.org/doc/documentation-main.html#parameters-dataproviders for details.
Please be mindful when adding tests to the "AllParallelizedTests" test tag that the tests can run in parallel with

View File

@ -95,11 +95,11 @@ setupData()
fi
# The SqlInputSource tests in the "batch-index" test group require data to be setup in MySQL before running the tests.
if [ "$DRUID_INTEGRATION_TEST_GROUP" = "batch-index" ] ; then
# The SqlInputSource tests in the "input-source" test group require data to be setup in MySQL before running the tests.
if [ "$DRUID_INTEGRATION_TEST_GROUP" = "input-source" ] ; then
# touch is needed because OverlayFS's copy-up operation breaks POSIX standards. See https://github.com/docker/for-linux/issues/72.
find /var/lib/mysql -type f -exec touch {} \; && service mysql start \
&& echo "CREATE database sqlinputsource DEFAULT CHARACTER SET utf8mb4;" | mysql -u root druid \
&& echo "GRANT ALL ON sqlinputsource.* TO 'druid'@'%'; CREATE database sqlinputsource DEFAULT CHARACTER SET utf8mb4;" | mysql -u root druid \
&& cat /test-data/sql-input-source-sample-data.sql | mysql -u root druid \
&& /etc/init.d/mysql stop
fi

View File

@ -25,7 +25,6 @@ SERVICE_DRUID_JAVA_OPTS=-server -Xmx64m -Xms64m -XX:+UseG1GC -agentlib:jdwp=tran
# Druid configs
druid_server_http_numThreads=100
druid_worker_capacity=3
druid_storage_storageDirectory=/shared/storage
druid_indexer_runner_javaOptsArray=["-server", "-Xmx256m", "-Xms256m", "-XX:NewSize=128m", "-XX:MaxNewSize=128m", "-XX:+UseG1GC", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Dlog4j.configurationFile=/shared/docker/lib/log4j2.xml"]
@ -37,4 +36,4 @@ druid_indexer_task_chathandler_type=announce
druid_auth_basic_common_cacheDirectory=/tmp/authCache/middleManager
druid_startup_logging_logProperties=true
druid_server_https_crlPath=/tls/revocations.crl
druid_worker_capacity=20
druid_worker_capacity=20

View File

@ -29,6 +29,8 @@ public class TestNGGroup
public static final String INPUT_FORMAT = "input-format";
public static final String INPUT_SOURCE = "input-source";
public static final String KAFKA_INDEX = "kafka-index";
public static final String KAFKA_INDEX_SLOW = "kafka-index-slow";

View File

@ -17,14 +17,13 @@
* under the License.
*/
package org.apache.druid.tests.parallelized;
package org.apache.druid.tests.indexer;
import com.google.common.collect.ImmutableList;
import org.apache.druid.indexer.partitions.DynamicPartitionsSpec;
import org.apache.druid.java.util.common.StringUtils;
import org.apache.druid.testing.guice.DruidTestModuleFactory;
import org.apache.druid.tests.TestNGGroup;
import org.apache.druid.tests.indexer.AbstractITBatchIndexTest;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Guice;
import org.testng.annotations.Test;
@ -34,23 +33,23 @@ import java.util.List;
import java.util.UUID;
import java.util.function.Function;
@Test(groups = TestNGGroup.BATCH_INDEX)
@Test(groups = TestNGGroup.INPUT_SOURCE)
@Guice(moduleFactory = DruidTestModuleFactory.class)
public class ITSqlInputSourceTest extends AbstractITBatchIndexTest
{
private static final String INDEX_TASK = "/indexer/wikipedia_index_task.json";
private static final String INDEX_TASK = "/indexer/wikipedia_parallel_index_using_sqlinputsource_task.json";
private static final String INDEX_QUERIES_RESOURCE = "/indexer/wikipedia_index_queries.json";
@DataProvider(parallel = true)
@DataProvider
public static Object[][] resources()
{
return new Object[][]{
// Multiple query. No filter
{ImmutableList.of("SELECT * FROM wikipedia_index_data1", "SELECT * FROM wikipedia_index_data2", "SELECT * FROM wikipedia_index_data3")},
// Multiple query. Filter on timestamp column
{ImmutableList.of("SELECT * FROM wikipedia_index_data1 WHERE timestamp BETWEEN '2013-08-31 00:00:00' AND '2013-08-31 11:59:59'",
"SELECT * FROM wikipedia_index_data2 WHERE timestamp BETWEEN '2013-08-31 00:00:00' AND '2013-09-01 11:59:59'",
"SELECT * FROM wikipedia_index_data3 WHERE timestamp BETWEEN '2013-09-01 00:00:00' AND '2013-09-01 11:59:59'")},
{ImmutableList.of("SELECT * FROM wikipedia_index_data1 WHERE timestamp BETWEEN '2013-08-31 00:00:00' AND '2013-09-02 00:00:00'",
"SELECT * FROM wikipedia_index_data2 WHERE timestamp BETWEEN '2013-08-31 00:00:00' AND '2013-09-02 00:00:00'",
"SELECT * FROM wikipedia_index_data3 WHERE timestamp BETWEEN '2013-09-01 00:00:00' AND '2013-09-02 00:00:00'")},
// Multiple query. Filter on data column
{ImmutableList.of("SELECT * FROM wikipedia_index_data1 WHERE added > 0",
"SELECT * FROM wikipedia_index_data2 WHERE added > 0",
@ -58,7 +57,7 @@ public class ITSqlInputSourceTest extends AbstractITBatchIndexTest
// Single query. No filter
{ImmutableList.of("SELECT * FROM wikipedia_index_data_all")},
// Single query. Filter on timestamp column
{ImmutableList.of("SELECT * FROM wikipedia_index_data_all WHERE timestamp BETWEEN '2013-08-31 00:00:00' AND '2013-09-01 11:59:59'")},
{ImmutableList.of("SELECT * FROM wikipedia_index_data_all WHERE timestamp BETWEEN '2013-08-31 00:00:00' AND '2013-09-02 00:00:00'")},
// Single query. Filter on data column
{ImmutableList.of("SELECT * FROM wikipedia_index_data_all WHERE added > 0")},
};