druid/pom.xml

1949 lines
87 KiB
XML
Raw Normal View History

2012-10-24 03:39:51 -04:00
<?xml version="1.0" encoding="UTF-8"?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one
~ or more contributor license agreements. See the NOTICE file
~ distributed with this work for additional information
~ regarding copyright ownership. The ASF licenses this file
~ to you under the Apache License, Version 2.0 (the
~ "License"); you may not use this file except in compliance
~ with the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing,
~ software distributed under the License is distributed on an
~ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
~ KIND, either express or implied. See the License for the
~ specific language governing permissions and limitations
~ under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
2012-10-24 03:39:51 -04:00
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.apache</groupId>
<artifactId>apache</artifactId>
<version>21</version>
</parent>
<groupId>org.apache.druid</groupId>
2012-10-24 03:39:51 -04:00
<artifactId>druid</artifactId>
<version>0.19.0-SNAPSHOT</version>
<packaging>pom</packaging>
<name>Druid</name>
<description>Druid - A Distributed Column Store</description>
<url>https://druid.apache.org/</url>
<licenses>
<license>
<name>Apache License, Version 2.0</name>
<url>https://www.apache.org/licenses/LICENSE-2.0</url>
</license>
</licenses>
<developers>
<developer>
<name>Apache Druid Committers</name>
<url>https://druid.apache.org/community/#committers</url>
</developer>
</developers>
<mailingLists>
<mailingList>
<name>Apache Druid developers list</name>
<subscribe>dev-subscribe@druid.apache.org</subscribe>
<unsubscribe>dev-unsubscribe@druid.apache.org</unsubscribe>
<post>dev@druid.apache.org</post>
<archive>https://mail-archives.apache.org/mod_mbox/druid-dev</archive>
</mailingList>
</mailingLists>
<inceptionYear>2011</inceptionYear>
2012-10-24 03:39:51 -04:00
<scm>
<connection>scm:git:ssh://git@github.com/apache/druid.git</connection>
<developerConnection>scm:git:ssh://git@github.com/apache/druid.git</developerConnection>
<url>https://github.com/apache/druid.git</url>
<tag>0.19.0-SNAPSHOT</tag>
</scm>
2012-10-24 03:39:51 -04:00
<properties>
<maven.compiler.source>1.8</maven.compiler.source>
<maven.compiler.target>1.8</maven.compiler.target>
<java.version>8</java.version>
<project.build.resourceEncoding>UTF-8</project.build.resourceEncoding>
<aether.version>0.9.0.M2</aether.version>
2019-01-15 17:12:07 -05:00
<apache.curator.version>4.1.0</apache.curator.version>
2017-09-15 13:48:32 -04:00
<apache.curator.test.version>2.12.0</apache.curator.test.version>
<apache.kafka.version>2.2.2</apache.kafka.version>
<avatica.version>1.15.0</avatica.version>
<avro.version>1.9.2</avro.version>
<calcite.version>1.21.0</calcite.version>
<datasketches.version>1.2.0-incubating</datasketches.version>
<derby.version>10.14.2.0</derby.version>
<dropwizard.metrics.version>4.0.0</dropwizard.metrics.version>
<guava.version>16.0.1</guava.version>
2016-07-18 11:08:43 -04:00
<guice.version>4.1.0</guice.version>
<hamcrest.version>1.3</hamcrest.version>
<jetty.version>9.4.12.v20180830</jetty.version>
<jersey.version>1.19.3</jersey.version>
2020-01-17 05:39:32 -05:00
<jackson.version>2.10.2</jackson.version>
<codehaus.jackson.version>1.9.13</codehaus.jackson.version>
<log4j.version>2.8.2</log4j.version>
<mysql.version>5.1.48</mysql.version>
<netty3.version>3.10.6.Final</netty3.version>
<resilience4j.version>1.3.1</resilience4j.version>
<!-- Spark updated in https://github.com/apache/spark/pull/19884 -->
<netty4.version>4.1.45.Final</netty4.version>
<node.version>v10.14.2</node.version>
<npm.version>6.5.0</npm.version>
<postgresql.version>42.2.8</postgresql.version>
<protobuf.version>3.11.0</protobuf.version>
2015-08-25 16:27:54 -04:00
<slf4j.version>1.7.12</slf4j.version>
<!-- If compiling with different hadoop version also modify default hadoop coordinates in TaskConfig.java -->
<hadoop.compile.version>2.8.5</hadoop.compile.version>
<powermock.version>2.0.2</powermock.version>
<aws.sdk.version>1.11.199</aws.sdk.version>
<caffeine.version>2.8.0</caffeine.version>
<!-- When upgrading ZK, edit docs and integration tests as well (integration-tests/docker-base/setup.sh) -->
<zookeeper.version>3.4.14</zookeeper.version>
<checkerframework.version>2.5.7</checkerframework.version>
<com.google.apis.client.version>1.22.0</com.google.apis.client.version>
<repoOrgId>apache.snapshots</repoOrgId>
<repoOrgName>Apache Snapshot Repository</repoOrgName>
<repoOrgUrl>https://repository.apache.org/snapshots</repoOrgUrl>
<!-- Allow the handful of flaky tests with transient failures to pass. -->
<surefire.rerunFailingTestsCount>3</surefire.rerunFailingTestsCount>
2012-10-24 03:39:51 -04:00
</properties>
<modules>
<module>core</module>
2013-05-15 20:28:08 -04:00
<module>indexing-hadoop</module>
2013-05-15 18:37:04 -04:00
<module>indexing-service</module>
2013-11-11 17:04:03 -05:00
<module>processing</module>
2013-05-15 20:28:08 -04:00
<module>server</module>
2016-12-16 20:15:59 -05:00
<module>sql</module>
2013-05-01 18:37:38 -04:00
<module>services</module>
2014-11-06 13:23:24 -05:00
<module>integration-tests</module>
<module>benchmarks</module>
<module>extendedset</module>
<module>hll</module>
<module>web-console</module>
<!-- Core cloud functionality -->
<module>cloud/aws-common</module>
<module>cloud/gcp-common</module>
<!-- Core extensions -->
<module>extensions-core/avro-extensions</module>
<module>extensions-core/azure-extensions</module>
<module>extensions-core/datasketches</module>
<module>extensions-core/druid-bloom-filter</module>
<module>extensions-core/druid-kerberos</module>
druid extension for OpenID Connect auth using pac4j lib (#8992) * druid pac4j security extension for OpenID Connect OAuth 2.0 authentication * update version in druid-pac4j pom * introducing unauthorized resource filter * authenticated but authorized /unified-webconsole.html * use httpReq.getRequestURI() for matching callback path * add documentation * minor doc addition * licesne file updates * make dependency analyze succeed * fix doc build * hopefully fixes doc build * hopefully fixes license check build * yet another try on fixing license build * revert unintentional changes to website folder * update version to 0.18.0-SNAPSHOT * check session and its expiry on each request * add crypto service * code for encrypting the cookie * update doc with cookiePassphrase * update license yaml * make sessionstore in Pac4jFilter private non static * make Pac4jFilter fields final * okta: use sha256 for hmac * remove incubating * add UTs for crypto util and session store impl * use standard charsets * add license header * remove unused file * add org.objenesis.objenesis to license.yaml * a bit of nit changes in CryptoService and embedding EncryptionResult for clarity * rename alg to cipherAlgName * take cipher alg name, mode and padding as input * add java doc for CryptoService and make it more understandable * another UT for CryptoService * cache pac4j Config * use generics clearly in Pac4jSessionStore * update cookiePassphrase doc to mention PasswordProvider * mark stuff Nullable where appropriate in Pac4jSessionStore * update doc to mention jdbc * add error log on reaching callback resource * javadoc for Pac4jCallbackResource * introduce NOOP_HTTP_ACTION_ADAPTER * add correct module name in license file * correct extensions folder name in licenses.yaml * replace druid-kubernetes-extensions to druid-pac4j * cache SecureRandom instance * rename UnauthorizedResourceFilter to AuthenticationOnlyResourceFilter
2020-03-23 21:15:45 -04:00
<module>extensions-core/druid-pac4j</module>
<module>extensions-core/hdfs-storage</module>
<module>extensions-core/histogram</module>
<module>extensions-core/stats</module>
<module>extensions-core/kafka-extraction-namespace</module>
<module>extensions-core/kafka-indexing-service</module>
Add Kinesis Indexing Service to core Druid (#6431) * created seekablestream classes * created seekablestreamsupervisor class * first attempt to integrate kafa indexing service to use SeekableStream * seekablestream bug fixes * kafkarecordsupplier * integrated kafka indexing service with seekablestream * implemented resume/suspend and refactored some package names * moved kinesis indexing service into core druid extensions * merged some changes from kafka supervisor race condition * integrated kinesis-indexing-service with seekablestream * unite tests for kinesis-indexing-service * various bug fixes for kinesis-indexing-service * refactored kinesisindexingtask * finished up more kinesis unit tests * more bug fixes for kinesis-indexing-service * finsihed refactoring kinesis unit tests * removed KinesisParititons and KafkaPartitions to use SeekableStreamPartitions * kinesis-indexing-service code cleanup and docs * merge #6291 merge #6337 merge #6383 * added more docs and reordered methods * fixd kinesis tests after merging master and added docs in seekablestream * fix various things from pr comment * improve recordsupplier and add unit tests * migrated to aws-java-sdk-kinesis * merge changes from master * fix pom files and forbiddenapi checks * checkpoint JavaType bug fix * fix pom and stuff * disable checkpointing in kinesis * fix kinesis sequence number null in closed shard * merge changes from master * fixes for kinesis tasks * capitalized <partitionType, sequenceType> * removed abstract class loggers * conform to guava api restrictions * add docker for travis other modules test * address comments * improve RecordSupplier to supply records in batch * fix strict compile issue * add test scope for localstack dependency * kinesis indexing task refactoring * comments * github comments * minor fix * removed unneeded readme * fix deserialization bug * fix various bugs * KinesisRecordSupplier unable to catch up to earliest position in stream bug fix * minor changes to kinesis * implement deaggregate for kinesis * Merge remote-tracking branch 'upstream/master' into seekablestream * fix kinesis offset discrepancy with kafka * kinesis record supplier disable getPosition * pr comments * mock for kinesis tests and remove docker dependency for unit tests * PR comments * avg lag in kafkasupervisor #6587 * refacotred SequenceMetadata in taskRunners * small fix * more small fix * recordsupplier resource leak * revert .travis.yml formatting * fix style * kinesis docs * doc part2 * more docs * comments * comments*2 * revert string replace changes * comments * teamcity * comments part 1 * comments part 2 * comments part 3 * merge #6754 * fix injection binding * comments * KinesisRegion refactor * comments part idk lol * can't think of a commit msg anymore * remove possiblyResetDataSourceMetadata() for IncrementalPublishingTaskRunner * commmmmmmmmmments * extra error handling in KinesisRecordSupplier getRecords * comments * quickfix * typo * oof
2018-12-21 14:49:24 -05:00
<module>extensions-core/kinesis-indexing-service</module>
<module>extensions-core/mysql-metadata-storage</module>
<module>extensions-core/orc-extensions</module>
<module>extensions-core/parquet-extensions</module>
<module>extensions-core/postgresql-metadata-storage</module>
<module>extensions-core/protobuf-extensions</module>
[QTL] Implement LookupExtractorFactory of namespaced lookup (#2926) * support LookupReferencesManager registration of namespaced lookup and eliminate static configurations for lookup from namespecd lookup extensions - druid-namespace-lookup and druid-kafka-extraction-namespace are modified - However, druid-namespace-lookup still has configuration about ON/OFF HEAP cache manager selection, which is not namespace wide configuration but node wide configuration as multiple namespace shares the same cache manager * update KafkaExtractionNamespaceTest to reflect argument signature changes * Add more synchronization functionality to NamespaceLookupExtractorFactory * Remove old way of using extraction namespaces * resolve compile error by supporting LookupIntrospectHandler * Remove kafka lookups * Remove unused stuff * Fix start and stop behavior to be consistent with new javadocs * Remove unused strings * Add timeout option * Address comments on configurations and improve docs * Add more options and update hash key and replaces * Move monitoring to the overriding classes * Add better start/stop logging * Remove old docs about namespace names * Fix bad comma * Add `@JsonIgnore` to lookup factory * Address code review comments * Remove ExtractionNamespace from module json registration * Fix problems with naming and initialization. Add tests * Optimize imports / reformat * Fix future not being properly cancelled on failed initial scheduling * Fix delete returns * Add more docs about whole introspection * Add `/version` introspection point for lookups * Add more tests and address comments * Add StaticMap extraction namespace for testing. Also add a bunch of tests * Move cache system property to `druid.lookup.namespace.cache.type` * Make VERSION lower case * Change poll period to 0ms for StaticMap * Move cache key to bytebuffer * Change hashCode and equals on static map extraction fn * Add more comments on StaticMap * Address comments * Make scheduleAndWait use a latch * Sanity renames and fix imports * Remove extra info in docs * Fix review comments * Strengthen failure on start from warn to error * Address comments * Rename namespace-lookup to lookups-cached-global * Fix injective mis-naming * Also add serde test
2016-05-24 13:56:40 -04:00
<module>extensions-core/lookups-cached-global</module>
<module>extensions-core/lookups-cached-single</module>
<module>extensions-core/ec2-extensions</module>
<module>extensions-core/s3-extensions</module>
2017-07-06 20:40:12 -04:00
<module>extensions-core/simple-client-sslcontext</module>
<module>extensions-core/druid-basic-security</module>
<module>extensions-core/google-extensions</module>
<!-- Community extensions -->
<module>extensions-contrib/influx-extensions</module>
<module>extensions-contrib/cassandra-storage</module>
<module>extensions-contrib/dropwizard-emitter</module>
<module>extensions-contrib/cloudfiles-extensions</module>
<module>extensions-contrib/graphite-emitter</module>
2016-03-23 23:11:11 -04:00
<module>extensions-contrib/distinctcount</module>
<module>extensions-contrib/statsd-emitter</module>
<module>extensions-contrib/time-min-max</module>
<module>extensions-contrib/virtual-columns</module>
<module>extensions-contrib/thrift-extensions</module>
<module>extensions-contrib/ambari-metrics-emitter</module>
<module>extensions-contrib/sqlserver-metadata-storage</module>
<module>extensions-contrib/kafka-emitter</module>
<module>extensions-contrib/redis-cache</module>
<module>extensions-contrib/opentsdb-emitter</module>
<module>extensions-contrib/materialized-view-maintenance</module>
<module>extensions-contrib/materialized-view-selection</module>
<module>extensions-contrib/momentsketch</module>
Contributing Moving-Average Query to open source. (#6430) * Contributing Moving-Average Query to open source. * Fix failing code inspections. * See if explicit types will invoke the correct comparison function. * Explicitly remove support for druid.generic.useDefaultValueForNull configuration parameter. * Update styling and headers for complience. * Refresh code with latest master changes: * Remove NullDimensionSelector. * Apply changes of RequestLogger. * Apply changes of TimelineServerView. * Small checkstyle fix. * Checkstyle fixes. * Fixing rat errors; Teamcity errors. * Removing support theta sketches. Will be added back in this pr or a following once DI conflicts with datasketches are resolved. * Implements some of the review fixes. * Contributing Moving-Average Query to open source. * Fix failing code inspections. * See if explicit types will invoke the correct comparison function. * Explicitly remove support for druid.generic.useDefaultValueForNull configuration parameter. * Update styling and headers for complience. * Refresh code with latest master changes: * Remove NullDimensionSelector. * Apply changes of RequestLogger. * Apply changes of TimelineServerView. * Small checkstyle fix. * Checkstyle fixes. * Fixing rat errors; Teamcity errors. * Removing support theta sketches. Will be added back in this pr or a following once DI conflicts with datasketches are resolved. * Implements some of the review fixes. * More fixes for review. * More fixes from review. * MapBasedRow is Unmodifiable. Create new rows instead of modifying existing ones. * Remove more changes related to datasketches support. * Refactor BaseAverager startFrom field and add a comment. * fakeEvents field: Refactor initialization and add comment. * Rename parameters (tiny change). * Fix variable name typo in test (JAN_4). * Fix styling of non camelCase fields. * Fix Preconditions.checkArgument for cycleSize. * Add more documentation to RowBucketIterable and other classes. * key/value comment on in MovingAverageIterable. * Fix anonymous makeColumnValueSelector returning null. * Replace IdentityYieldingAccumolator with Yielders.each(). * * internalNext() should return null instead of throwing exception. * Remove unused variables/prarameters. * Harden MovingAverageIterableTest (Switch anyOf to exact match). * Change internalNext() from recursion to iteration; Simplify next() and hasNext(). * Remove unused imports. * Address review comments. * Rename fakeEvents to emptyEvents. * Remove redundant parameter key from computeMovingAverage. * Check yielder as well in RowBucketIterable#hasNext() * Fix javadoc.
2019-04-26 20:07:48 -04:00
<module>extensions-contrib/moving-average-query</module>
<module>extensions-contrib/tdigestsketch</module>
<module>extensions-contrib/influxdb-emitter</module>
<!-- distribution packaging -->
<module>distribution</module>
2012-10-24 03:39:51 -04:00
</modules>
<repositories>
<repository>
<id>${repoOrgId}</id>
<name>${repoOrgName}</name>
<url>${repoOrgUrl}</url>
</repository>
<!-- Only used by core, but moved to root for parallel build dependency resolution -->
<repository>
<id>sigar</id>
<url>https://repository.jboss.org/nexus/content/repositories/thirdparty-uploads/</url>
</repository>
</repositories>
<dependencyManagement>
<dependencies>
<!-- Compile Scope -->
<dependency>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
<version>1.13</version>
</dependency>
<dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
<version>2.6</version>
</dependency>
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
<version>1.1.1</version>
</dependency>
<dependency>
<groupId>commons-lang</groupId>
<artifactId>commons-lang</artifactId>
<version>2.6</version>
</dependency>
<dependency>
<groupId>commons-net</groupId>
<artifactId>commons-net</artifactId>
<version>3.6</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
<version>3.8.1</version>
</dependency>
<dependency>
<groupId>com.amazonaws</groupId>
<artifactId>aws-java-sdk-core</artifactId>
<version>${aws.sdk.version}</version>
</dependency>
<dependency>
<groupId>com.amazonaws</groupId>
<artifactId>aws-java-sdk-ec2</artifactId>
<version>${aws.sdk.version}</version>
</dependency>
<dependency>
<groupId>com.amazonaws</groupId>
<artifactId>aws-java-sdk-s3</artifactId>
<version>${aws.sdk.version}</version>
</dependency>
<dependency>
<groupId>com.ning</groupId>
<artifactId>compress-lzf</artifactId>
<version>1.0.4</version>
</dependency>
<dependency>
<groupId>io.airlift</groupId>
<artifactId>airline</artifactId>
2015-08-25 18:30:12 -04:00
<version>0.7</version>
<exclusions>
<exclusion>
<!--LGPL licenced library-->
<groupId>com.google.code.findbugs</groupId>
<artifactId>annotations</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.skife.config</groupId>
<artifactId>config-magic</artifactId>
<version>0.9</version>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
<version>${zookeeper.version}</version>
2016-06-03 14:39:23 -04:00
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
<exclusion>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
</exclusion>
<exclusion>
<groupId>io.netty</groupId>
<artifactId>netty</artifactId>
</exclusion>
<exclusion>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs-annotations</artifactId>
</exclusion>
2016-06-03 14:39:23 -04:00
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-client</artifactId>
2013-05-15 20:28:08 -04:00
<version>${apache.curator.version}</version>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-framework</artifactId>
2013-05-15 20:28:08 -04:00
<version>${apache.curator.version}</version>
<exclusions>
<exclusion>
<groupId>org.jboss.netty</groupId>
<artifactId>netty</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-recipes</artifactId>
2013-05-15 20:28:08 -04:00
<version>${apache.curator.version}</version>
2017-09-15 13:48:32 -04:00
<exclusions>
<exclusion>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<!--
~ TODO: This dependency transitively pulls in org.codehaus.jackson:jackson-mapper.asl.1.9.13 which has
~ security vulnerability CVE-2017-7525. https://github.com/apache/druid/pull/8177 tracks
~ upgrading to a newer curator version.
-->
<groupId>org.apache.curator</groupId>
<artifactId>curator-x-discovery</artifactId>
<version>${apache.curator.version}</version>
</dependency>
2016-12-16 20:15:59 -05:00
<dependency>
<groupId>org.apache.calcite</groupId>
<artifactId>calcite-core</artifactId>
<version>${calcite.version}</version>
</dependency>
<dependency>
<groupId>org.apache.calcite</groupId>
<artifactId>calcite-linq4j</artifactId>
<version>${calcite.version}</version>
</dependency>
<dependency>
<groupId>org.apache.calcite.avatica</groupId>
<artifactId>avatica</artifactId>
<version>${avatica.version}</version>
</dependency>
2017-01-06 17:45:17 -05:00
<dependency>
<groupId>org.apache.calcite.avatica</groupId>
<artifactId>avatica-core</artifactId>
<version>${avatica.version}</version>
</dependency>
<dependency>
<groupId>org.apache.calcite.avatica</groupId>
<artifactId>avatica-metrics</artifactId>
<version>${avatica.version}</version>
</dependency>
2016-12-16 20:15:59 -05:00
<dependency>
<groupId>org.apache.calcite.avatica</groupId>
<artifactId>avatica-server</artifactId>
<version>${avatica.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-storage-api</artifactId>
<version>2.6.0</version>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>${guava.version}</version>
</dependency>
<dependency>
<groupId>com.google.inject</groupId>
<artifactId>guice</artifactId>
2016-07-18 11:08:43 -04:00
<version>${guice.version}</version>
</dependency>
<dependency>
<groupId>com.google.inject.extensions</groupId>
<artifactId>guice-servlet</artifactId>
2016-07-18 11:08:43 -04:00
<version>${guice.version}</version>
</dependency>
<dependency>
<groupId>com.google.inject.extensions</groupId>
<artifactId>guice-multibindings</artifactId>
2016-07-18 11:08:43 -04:00
<version>${guice.version}</version>
</dependency>
<dependency>
<groupId>com.google.inject.extensions</groupId>
<artifactId>guice-assistedinject</artifactId>
<version>${guice.version}</version>
</dependency>
<dependency>
<groupId>com.google.errorprone</groupId>
<artifactId>error_prone_annotations</artifactId>
<version>2.3.2</version>
</dependency>
<dependency>
<groupId>com.ibm.icu</groupId>
<artifactId>icu4j</artifactId>
<version>55.1</version>
</dependency>
2015-02-19 12:47:11 -05:00
<dependency>
<groupId>org.mozilla</groupId>
<artifactId>rhino</artifactId>
<version>1.7.11</version>
2015-02-19 12:47:11 -05:00
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-compress</artifactId>
<version>1.19</version>
</dependency>
<dependency>
<groupId>org.tukaani</groupId>
<artifactId>xz</artifactId>
<version>1.8</version>
</dependency>
<dependency>
<groupId>com.github.luben</groupId>
<artifactId>zstd-jni</artifactId>
<version>1.3.3-1</version>
</dependency>
<dependency>
2013-05-15 18:37:04 -04:00
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-annotations</artifactId>
2014-12-03 12:44:19 -05:00
<version>${jackson.version}</version>
</dependency>
<dependency>
2013-05-15 18:37:04 -04:00
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-core</artifactId>
2014-12-03 12:44:19 -05:00
<version>${jackson.version}</version>
</dependency>
<dependency>
2013-05-15 18:37:04 -04:00
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
<version>${jackson.version}</version>
</dependency>
2013-02-25 20:05:01 -05:00
<dependency>
2013-05-15 18:37:04 -04:00
<groupId>com.fasterxml.jackson.datatype</groupId>
<artifactId>jackson-datatype-guava</artifactId>
2014-12-03 12:44:19 -05:00
<version>${jackson.version}</version>
2013-02-25 20:05:01 -05:00
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.datatype</groupId>
<artifactId>jackson-datatype-joda</artifactId>
2014-12-03 12:44:19 -05:00
<version>${jackson.version}</version>
</dependency>
<dependency>
<!--
~ This is a transitive dependency of com.amazonaws:aws-java-sdk-core. Override the version here so
~ that it is the same as the other jackson dependencies.
-->
<groupId>com.fasterxml.jackson.dataformat</groupId>
<artifactId>jackson-dataformat-cbor</artifactId>
<version>${jackson.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.dataformat</groupId>
<artifactId>jackson-dataformat-smile</artifactId>
2014-12-03 12:44:19 -05:00
<version>${jackson.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.jaxrs</groupId>
<artifactId>jackson-jaxrs-json-provider</artifactId>
2014-12-03 12:44:19 -05:00
<version>${jackson.version}</version>
</dependency>
2014-10-07 17:19:59 -04:00
<dependency>
<groupId>com.fasterxml.jackson.jaxrs</groupId>
<artifactId>jackson-jaxrs-smile-provider</artifactId>
2014-12-03 12:44:19 -05:00
<version>${jackson.version}</version>
</dependency>
<dependency>
<groupId>org.hibernate</groupId>
<artifactId>hibernate-validator</artifactId>
<version>5.2.5.Final</version>
</dependency>
<dependency>
<groupId>javax.validation</groupId>
<artifactId>validation-api</artifactId>
<version>1.1.0.Final</version>
</dependency>
<dependency>
<groupId>javax.inject</groupId>
<artifactId>javax.inject</artifactId>
<version>1</version>
</dependency>
<dependency>
<groupId>javax.el</groupId>
<artifactId>javax.el-api</artifactId>
<version>3.0.0</version>
</dependency>
<dependency>
<groupId>javax.servlet</groupId>
<artifactId>servlet-api</artifactId>
<version>2.5</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>javax.xml.bind</groupId>
<artifactId>jaxb-api</artifactId>
<version>2.3.1</version>
</dependency>
2014-06-06 17:19:55 -04:00
<dependency>
2014-07-28 19:32:15 -04:00
<groupId>org.glassfish</groupId>
<artifactId>javax.el</artifactId>
<version>3.0.0</version>
2014-06-06 17:19:55 -04:00
</dependency>
<dependency>
<groupId>org.glassfish.grizzly</groupId>
<artifactId>grizzly-http-server</artifactId>
<version>2.2.16</version>
</dependency>
<dependency>
<groupId>org.glassfish.jaxb</groupId>
<artifactId>jaxb-runtime</artifactId>
<version>2.3.1</version>
</dependency>
<dependency>
<groupId>org.jdbi</groupId>
<artifactId>jdbi</artifactId>
2015-08-25 18:12:34 -04:00
<version>2.63.1</version>
</dependency>
<dependency>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-client</artifactId>
<version>${jersey.version}</version>
</dependency>
<dependency>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-core</artifactId>
2015-08-07 20:32:27 -04:00
<version>${jersey.version}</version>
</dependency>
<dependency>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-grizzly2</artifactId>
<version>${jersey.version}</version>
</dependency>
<dependency>
<groupId>com.sun.jersey.contribs</groupId>
<artifactId>jersey-guice</artifactId>
2015-08-07 20:32:27 -04:00
<version>${jersey.version}</version>
2013-12-30 18:26:46 -05:00
<exclusions>
<exclusion>
<groupId>com.google.inject</groupId>
<artifactId>guice</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.inject.extensions</groupId>
<artifactId>guice-servlet</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-server</artifactId>
2015-08-07 20:32:27 -04:00
<version>${jersey.version}</version>
</dependency>
<dependency>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-servlet</artifactId>
<version>${jersey.version}</version>
</dependency>
<dependency>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-json</artifactId>
<version>${jersey.version}</version>
</dependency>
<dependency>
<groupId>org.eclipse.jetty</groupId>
<artifactId>jetty-client</artifactId>
<version>${jetty.version}</version>
</dependency>
<dependency>
<groupId>org.eclipse.jetty</groupId>
<artifactId>jetty-http</artifactId>
<version>${jetty.version}</version>
</dependency>
<dependency>
<groupId>org.eclipse.jetty</groupId>
<artifactId>jetty-io</artifactId>
<version>${jetty.version}</version>
</dependency>
<dependency>
<groupId>org.eclipse.jetty</groupId>
<artifactId>jetty-server</artifactId>
2014-11-26 15:24:40 -05:00
<version>${jetty.version}</version>
</dependency>
<dependency>
<groupId>org.eclipse.jetty</groupId>
<artifactId>jetty-servlet</artifactId>
2014-11-26 15:24:40 -05:00
<version>${jetty.version}</version>
</dependency>
<dependency>
<groupId>org.eclipse.jetty</groupId>
<artifactId>jetty-servlets</artifactId>
2014-11-26 15:24:40 -05:00
<version>${jetty.version}</version>
2014-07-28 19:32:15 -04:00
</dependency>
<dependency>
<groupId>org.eclipse.jetty</groupId>
<artifactId>jetty-proxy</artifactId>
2014-11-26 15:24:40 -05:00
<version>${jetty.version}</version>
</dependency>
<dependency>
<groupId>org.eclipse.jetty</groupId>
<artifactId>jetty-rewrite</artifactId>
<version>${jetty.version}</version>
</dependency>
2016-12-16 20:15:59 -05:00
<dependency>
<groupId>org.eclipse.jetty</groupId>
<artifactId>jetty-util</artifactId>
<version>${jetty.version}</version>
</dependency>
<dependency>
<groupId>org.eclipse.jetty</groupId>
<artifactId>jetty-security</artifactId>
<version>${jetty.version}</version>
</dependency>
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty</artifactId>
<version>${netty3.version}</version>
</dependency>
2016-12-16 20:15:59 -05:00
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-buffer</artifactId>
<version>${netty4.version}</version>
</dependency>
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-common</artifactId>
<version>${netty4.version}</version>
</dependency>
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-codec-dns</artifactId>
<version>${netty4.version}</version>
</dependency>
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-codec-http</artifactId>
<version>${netty4.version}</version>
</dependency>
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-codec-socks</artifactId>
<version>${netty4.version}</version>
</dependency>
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-handler</artifactId>
<version>${netty4.version}</version>
</dependency>
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-handler-proxy</artifactId>
<version>${netty4.version}</version>
</dependency>
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-resolver</artifactId>
<version>${netty4.version}</version>
</dependency>
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-resolver-dns</artifactId>
<version>${netty4.version}</version>
</dependency>
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-transport-native-epoll</artifactId>
<classifier>linux-x86_64</classifier>
<version>${netty4.version}</version>
</dependency>
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-transport-native-unix-common</artifactId>
<version>${netty4.version}</version>
2016-12-16 20:15:59 -05:00
</dependency>
<dependency>
<groupId>joda-time</groupId>
<artifactId>joda-time</artifactId>
<version>2.10.5</version>
</dependency>
<dependency>
<groupId>com.google.code.findbugs</groupId>
<artifactId>jsr305</artifactId>
<version>2.0.1</version>
</dependency>
<dependency>
<groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j-api</artifactId>
<version>${log4j.version}</version>
</dependency>
<dependency>
<groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j-core</artifactId>
<version>${log4j.version}</version>
</dependency>
<dependency>
<groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j-slf4j-impl</artifactId>
<version>${log4j.version}</version>
</dependency>
<dependency>
<groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j-1.2-api</artifactId>
<version>${log4j.version}</version>
</dependency>
<dependency>
<!-- This is not slf4j's version because of performance concerns
http://www.slf4j.org/api/org/slf4j/bridge/SLF4JBridgeHandler.html
Please make sure to do performance tests before switching this to slf4j
Users wishing to use slf4j's solution are encouraged to also use
Logback
More info at
http://logback.qos.ch/manual/configuration.html#LevelChangePropagator
http://www.slf4j.org/legacy.html#jul-to-slf4j
-->
<groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j-jul</artifactId>
<version>${log4j.version}</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>jcl-over-slf4j</artifactId>
<version>${slf4j.version}</version>
</dependency>
<dependency>
<groupId>com.lmax</groupId>
<artifactId>disruptor</artifactId>
<version>3.3.6</version>
</dependency>
2012-12-06 18:54:40 -05:00
<dependency>
<groupId>net.spy</groupId>
2012-12-06 18:54:40 -05:00
<artifactId>spymemcached</artifactId>
<version>2.12.3</version>
2012-12-06 18:54:40 -05:00
</dependency>
2013-02-25 12:26:52 -05:00
<dependency>
<groupId>org.antlr</groupId>
<artifactId>antlr4-runtime</artifactId>
<version>4.5.1</version>
2013-02-25 12:26:52 -05:00
</dependency>
<dependency>
<groupId>org.antlr</groupId>
<artifactId>antlr4-coordinator</artifactId>
<version>4.5.1</version>
2013-02-25 12:26:52 -05:00
</dependency>
<dependency>
<groupId>commons-cli</groupId>
<artifactId>commons-cli</artifactId>
<version>1.3.1</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-dbcp2</artifactId>
<version>2.0.1</version>
</dependency>
<dependency>
<groupId>org.lz4</groupId>
<artifactId>lz4-java</artifactId>
<version>1.6.0</version>
</dependency>
<dependency>
<groupId>com.google.protobuf</groupId>
<artifactId>protobuf-java</artifactId>
<version>${protobuf.version}</version>
</dependency>
<dependency>
<groupId>io.tesla.aether</groupId>
<artifactId>tesla-aether</artifactId>
<version>0.0.5</version>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.eclipse.aether</groupId>
<artifactId>aether-api</artifactId>
<version>${aether.version}</version>
</dependency>
<dependency>
<groupId>org.eclipse.aether</groupId>
<artifactId>aether-util</artifactId>
<version>${aether.version}</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
<version>4.5.10</version>
</dependency>
2013-11-13 16:06:34 -05:00
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpcore</artifactId>
<version>4.4.11</version>
2013-11-13 16:06:34 -05:00
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-auth</artifactId>
<version>${hadoop.compile.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
2014-03-26 02:13:31 -04:00
<artifactId>hadoop-client</artifactId>
<version>${hadoop.compile.version}</version>
<scope>provided</scope>
<exclusions>
<exclusion>
<groupId>org.apache.avro</groupId>
<artifactId>avro</artifactId>
</exclusion>
</exclusions>
</dependency>
Kill Hadoop MR task on kill of Hadoop ingestion task (#6828) * KillTask from overlord UI now makes sure that it terminates the underlying MR job, thus saving unnecessary compute Run in jobby is now split into 2 1. submitAndGetHadoopJobId followed by 2. run submitAndGetHadoopJobId is responsible for submitting the job and returning the jobId as a string, run monitors this job for completion JobHelper writes this jobId in the path provided by HadoopIndexTask which in turn is provided by the ForkingTaskRunner HadoopIndexTask reads this path when kill task is clicked to get hte jobId and fire the kill command via the yarn api. This is taken care in the stopGracefully method which is called in SingleTaskBackgroundRunner. Have enabled `canRestore` method to return `true` for HadoopIndexTask in order for the stopGracefully method to be called Hadoop*Job files have been changed to incorporate the changes to jobby * Addressing PR comments * Addressing PR comments - Fix taskDir * Addressing PR comments - For changing the contract of Task.stopGracefully() `SingleTaskBackgroundRunner` calls stopGracefully in stop() and then checks for canRestore condition to return the status of the task * Addressing PR comments 1. Formatting 2. Removing `submitAndGetHadoopJobId` from `Jobby` and calling writeJobIdToFile in the job itself * Addressing PR comments 1. POM change. Moving hadoop dependency to indexing-hadoop * Addressing PR comments 1. stopGracefully now accepts TaskConfig as a param Handling isRestoreOnRestart in stopGracefully for `AppenderatorDriverRealtimeIndexTask, RealtimeIndexTask, SeekableStreamIndexTask` Changing tests to make TaskConfig param isRestoreOnRestart to true
2019-01-25 18:43:06 -05:00
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>${hadoop.compile.version}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs-client</artifactId>
<version>${hadoop.compile.version}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-common</artifactId>
<version>${hadoop.compile.version}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-core</artifactId>
<version>${hadoop.compile.version}</version>
<scope>provided</scope>
<exclusions>
<exclusion>
<groupId>javax.servlet</groupId>
<artifactId>servlet-api</artifactId>
</exclusion>
</exclusions>
</dependency>
2014-09-09 06:29:33 -04:00
<dependency>
<groupId>org.mapdb</groupId>
<artifactId>mapdb</artifactId>
2015-08-25 15:51:28 -04:00
<version>1.0.8</version>
2014-09-09 06:29:33 -04:00
</dependency>
<dependency>
<groupId>org.apache.derby</groupId>
<artifactId>derby</artifactId>
<version>${derby.version}</version>
</dependency>
<dependency>
<groupId>org.apache.derby</groupId>
<artifactId>derbynet</artifactId>
<version>${derby.version}</version>
</dependency>
<dependency>
<groupId>org.apache.derby</groupId>
<artifactId>derbyclient</artifactId>
<version>${derby.version}</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-math3</artifactId>
<version>3.6.1</version>
</dependency>
<dependency>
<groupId>it.unimi.dsi</groupId>
<artifactId>fastutil</artifactId>
<version>8.2.3</version>
</dependency>
<dependency>
<groupId>com.opencsv</groupId>
<artifactId>opencsv</artifactId>
<version>4.6</version>
</dependency>
<dependency>
<groupId>commons-beanutils</groupId>
<artifactId>commons-beanutils</artifactId>
<!-- override the transitive dependency from com.opencsv:opensv:4.6 to version 1.9.3 to address CVE-2014-0114 -->
<version>1.9.4</version>
</dependency>
<dependency>
<groupId>com.jayway.jsonpath</groupId>
<artifactId>json-path</artifactId>
<version>2.3.0</version>
</dependency>
<dependency>
<groupId>net.thisptr</groupId>
<artifactId>jackson-jq</artifactId>
<version>0.0.10</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>1.7.25</version>
</dependency>
<dependency>
<groupId>org.roaringbitmap</groupId>
<artifactId>RoaringBitmap</artifactId>
<version>0.8.11</version>
</dependency>
Monomorphic processing of TopN queries with 1 and 2 aggregators (key part of #3798) (#3889) * Monomorphic processing: add HotLoopCallee, CalledFromHotLoop, RuntimeShapeInspector, SpecializationService. Specialize topN queries with 1 or 2 aggregators. Add Cursor.advanceUninterruptibly() and isDoneOrInterrupted() for exception-free query processing. * Use Execs.singleThreaded() * RuntimeShapeInspector to support nullable fields * Make CalledFromHotLoop annotation Inherited * Remove unnecessary conversion of array of ColumnSelectorPluses to list and back to array in CardinalityAggregatorFactory * Close InputStream in SpecializationService * Formatting * Test specialized PooledTopNScanners * Set flags in PooledTopNAlgorithm directly * Fix tests, dependent on CountAggragatorFactory toString() form * Fix * Revert CountAggregatorFactory changes * Implement inspectRuntimeShape() for LongWrappingDimensionSelector and FloatWrappingDimensionSelector * Remove duplicate RoaringBitmap dependency in the extendedset pom.xml * Fix * Treat ByteBuffers specially in StringRuntimeShape * Doc fix * Annotate BufferAggregator.init() with CalledFromHotLoop * Make triggerSpecializationIterationsThreshold an int * Remove SpecializationService.PerPrototypeClassState.of() * Add comments * Limit the amount of specializations that SpecializationService could make * Add default implementation for BufferAggregator.inspectRuntimeShape(), for compatibility with extensions * Use more efficient ConcurrentMap's idioms in SpecializationService
2017-03-17 15:44:36 -04:00
<dependency>
<groupId>org.ow2.asm</groupId>
<artifactId>asm</artifactId>
<version>7.1</version>
Monomorphic processing of TopN queries with 1 and 2 aggregators (key part of #3798) (#3889) * Monomorphic processing: add HotLoopCallee, CalledFromHotLoop, RuntimeShapeInspector, SpecializationService. Specialize topN queries with 1 or 2 aggregators. Add Cursor.advanceUninterruptibly() and isDoneOrInterrupted() for exception-free query processing. * Use Execs.singleThreaded() * RuntimeShapeInspector to support nullable fields * Make CalledFromHotLoop annotation Inherited * Remove unnecessary conversion of array of ColumnSelectorPluses to list and back to array in CardinalityAggregatorFactory * Close InputStream in SpecializationService * Formatting * Test specialized PooledTopNScanners * Set flags in PooledTopNAlgorithm directly * Fix tests, dependent on CountAggragatorFactory toString() form * Fix * Revert CountAggregatorFactory changes * Implement inspectRuntimeShape() for LongWrappingDimensionSelector and FloatWrappingDimensionSelector * Remove duplicate RoaringBitmap dependency in the extendedset pom.xml * Fix * Treat ByteBuffers specially in StringRuntimeShape * Doc fix * Annotate BufferAggregator.init() with CalledFromHotLoop * Make triggerSpecializationIterationsThreshold an int * Remove SpecializationService.PerPrototypeClassState.of() * Add comments * Limit the amount of specializations that SpecializationService could make * Add default implementation for BufferAggregator.inspectRuntimeShape(), for compatibility with extensions * Use more efficient ConcurrentMap's idioms in SpecializationService
2017-03-17 15:44:36 -04:00
</dependency>
<dependency>
<groupId>org.ow2.asm</groupId>
<artifactId>asm-commons</artifactId>
<version>7.1</version>
Monomorphic processing of TopN queries with 1 and 2 aggregators (key part of #3798) (#3889) * Monomorphic processing: add HotLoopCallee, CalledFromHotLoop, RuntimeShapeInspector, SpecializationService. Specialize topN queries with 1 or 2 aggregators. Add Cursor.advanceUninterruptibly() and isDoneOrInterrupted() for exception-free query processing. * Use Execs.singleThreaded() * RuntimeShapeInspector to support nullable fields * Make CalledFromHotLoop annotation Inherited * Remove unnecessary conversion of array of ColumnSelectorPluses to list and back to array in CardinalityAggregatorFactory * Close InputStream in SpecializationService * Formatting * Test specialized PooledTopNScanners * Set flags in PooledTopNAlgorithm directly * Fix tests, dependent on CountAggragatorFactory toString() form * Fix * Revert CountAggregatorFactory changes * Implement inspectRuntimeShape() for LongWrappingDimensionSelector and FloatWrappingDimensionSelector * Remove duplicate RoaringBitmap dependency in the extendedset pom.xml * Fix * Treat ByteBuffers specially in StringRuntimeShape * Doc fix * Annotate BufferAggregator.init() with CalledFromHotLoop * Make triggerSpecializationIterationsThreshold an int * Remove SpecializationService.PerPrototypeClassState.of() * Add comments * Limit the amount of specializations that SpecializationService could make * Add default implementation for BufferAggregator.inspectRuntimeShape(), for compatibility with extensions * Use more efficient ConcurrentMap's idioms in SpecializationService
2017-03-17 15:44:36 -04:00
</dependency>
<dependency>
<groupId>org.asynchttpclient</groupId>
<artifactId>async-http-client</artifactId>
<!-- Uses Netty 4.1.x -->
<version>2.5.3</version>
</dependency>
<dependency>
<groupId>org.gridkit.lab</groupId>
<artifactId>jvm-attach-api</artifactId>
<version>1.5</version>
</dependency>
<dependency>
<groupId>net.java.dev.jna</groupId>
<artifactId>jna</artifactId>
<version>4.5.1</version>
</dependency>
<dependency>
<groupId>io.dropwizard.metrics</groupId>
<artifactId>metrics-core</artifactId>
<version>${dropwizard.metrics.version}</version>
</dependency>
<dependency>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-core-asl</artifactId>
<version>${codehaus.jackson.version}</version>
</dependency>
<dependency>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-mapper-asl</artifactId>
<version>${codehaus.jackson.version}</version>
</dependency>
<dependency>
<groupId>javax.servlet</groupId>
<artifactId>javax.servlet-api</artifactId>
<version>3.1.0</version>
</dependency>
<dependency>
<groupId>javax.activation</groupId>
<artifactId>activation</artifactId>
<version>1.1.1</version>
</dependency>
<dependency>
<groupId>commons-pool</groupId>
<artifactId>commons-pool</artifactId>
<version>1.6</version>
</dependency>
<dependency>
<groupId>org.codehaus.plexus</groupId>
<artifactId>plexus-utils</artifactId>
<version>3.0.24</version>
</dependency>
<dependency>
<groupId>com.github.ben-manes.caffeine</groupId>
<artifactId>caffeine</artifactId>
<version>${caffeine.version}</version>
</dependency>
<dependency>
<groupId>org.apache.maven</groupId>
<artifactId>maven-artifact</artifactId>
<version>3.6.0</version>
</dependency>
<dependency>
<groupId>javax.ws.rs</groupId>
<artifactId>jsr311-api</artifactId>
<version>1.1.1</version>
</dependency>
<dependency>
<groupId>org.schemarepo</groupId>
<artifactId>schema-repo-common</artifactId>
<version>0.1.3</version>
</dependency>
<dependency>
<groupId>org.apache.avro</groupId>
<artifactId>avro</artifactId>
<version>${avro.version}</version>
</dependency>
<dependency>
<groupId>org.apache.directory.api</groupId>
<artifactId>api-util</artifactId>
<version>1.0.3</version>
</dependency>
Parallel indexing single dim partitions (#8925) * Parallel indexing single dim partitions Implements single dimension range partitioning for native parallel batch indexing as described in #8769. This initial version requires the druid-datasketches extension to be loaded. The algorithm has 5 phases that are orchestrated by the supervisor in `ParallelIndexSupervisorTask#runRangePartitionMultiPhaseParallel()`. These phases and the main classes involved are described below: 1) In parallel, determine the distribution of dimension values for each input source split. `PartialDimensionDistributionTask` uses `StringSketch` to generate the approximate distribution of dimension values for each input source split. If the rows are ungrouped, `PartialDimensionDistributionTask.UngroupedRowDimensionValueFilter` uses a Bloom filter to skip rows that would be grouped. The final distribution is sent back to the supervisor via `DimensionDistributionReport`. 2) The range partitions are determined. In `ParallelIndexSupervisorTask#determineAllRangePartitions()`, the supervisor uses `StringSketchMerger` to merge the individual `StringSketch`es created in the preceding phase. The merged sketch is then used to create the range partitions. 3) In parallel, generate partial range-partitioned segments. `PartialRangeSegmentGenerateTask` uses the range partitions determined in the preceding phase and `RangePartitionCachingLocalSegmentAllocator` to generate `SingleDimensionShardSpec`s. The partition information is sent back to the supervisor via `GeneratedGenericPartitionsReport`. 4) The partial range segments are grouped. In `ParallelIndexSupervisorTask#groupGenericPartitionLocationsPerPartition()`, the supervisor creates the `PartialGenericSegmentMergeIOConfig`s necessary for the next phase. 5) In parallel, merge partial range-partitioned segments. `PartialGenericSegmentMergeTask` uses `GenericPartitionLocation` to retrieve the partial range-partitioned segments generated earlier and then merges and publishes them. * Fix dependencies & forbidden apis * Fixes for integration test * Address review comments * Fix docs, strict compile, sketch check, rollup check * Fix first shard spec, partition serde, single subtask * Fix first partition check in test * Misc rewording/refactoring to address code review * Fix doc link * Split batch index integration test * Do not run parallel-batch-index twice * Adjust last partition * Split ITParallelIndexTest to reduce runtime * Rename test class * Allow null values in range partitions * Indicate which phase failed * Improve asserts in tests
2019-12-10 02:05:49 -05:00
<dependency>
<groupId>org.apache.datasketches</groupId>
<artifactId>datasketches-java</artifactId>
<version>${datasketches.version}</version>
Parallel indexing single dim partitions (#8925) * Parallel indexing single dim partitions Implements single dimension range partitioning for native parallel batch indexing as described in #8769. This initial version requires the druid-datasketches extension to be loaded. The algorithm has 5 phases that are orchestrated by the supervisor in `ParallelIndexSupervisorTask#runRangePartitionMultiPhaseParallel()`. These phases and the main classes involved are described below: 1) In parallel, determine the distribution of dimension values for each input source split. `PartialDimensionDistributionTask` uses `StringSketch` to generate the approximate distribution of dimension values for each input source split. If the rows are ungrouped, `PartialDimensionDistributionTask.UngroupedRowDimensionValueFilter` uses a Bloom filter to skip rows that would be grouped. The final distribution is sent back to the supervisor via `DimensionDistributionReport`. 2) The range partitions are determined. In `ParallelIndexSupervisorTask#determineAllRangePartitions()`, the supervisor uses `StringSketchMerger` to merge the individual `StringSketch`es created in the preceding phase. The merged sketch is then used to create the range partitions. 3) In parallel, generate partial range-partitioned segments. `PartialRangeSegmentGenerateTask` uses the range partitions determined in the preceding phase and `RangePartitionCachingLocalSegmentAllocator` to generate `SingleDimensionShardSpec`s. The partition information is sent back to the supervisor via `GeneratedGenericPartitionsReport`. 4) The partial range segments are grouped. In `ParallelIndexSupervisorTask#groupGenericPartitionLocationsPerPartition()`, the supervisor creates the `PartialGenericSegmentMergeIOConfig`s necessary for the next phase. 5) In parallel, merge partial range-partitioned segments. `PartialGenericSegmentMergeTask` uses `GenericPartitionLocation` to retrieve the partial range-partitioned segments generated earlier and then merges and publishes them. * Fix dependencies & forbidden apis * Fixes for integration test * Address review comments * Fix docs, strict compile, sketch check, rollup check * Fix first shard spec, partition serde, single subtask * Fix first partition check in test * Misc rewording/refactoring to address code review * Fix doc link * Split batch index integration test * Do not run parallel-batch-index twice * Adjust last partition * Split ITParallelIndexTest to reduce runtime * Rename test class * Allow null values in range partitions * Indicate which phase failed * Improve asserts in tests
2019-12-10 02:05:49 -05:00
</dependency>
<dependency>
<groupId>org.apache.datasketches</groupId>
<artifactId>datasketches-memory</artifactId>
<version>${datasketches.version}</version>
Parallel indexing single dim partitions (#8925) * Parallel indexing single dim partitions Implements single dimension range partitioning for native parallel batch indexing as described in #8769. This initial version requires the druid-datasketches extension to be loaded. The algorithm has 5 phases that are orchestrated by the supervisor in `ParallelIndexSupervisorTask#runRangePartitionMultiPhaseParallel()`. These phases and the main classes involved are described below: 1) In parallel, determine the distribution of dimension values for each input source split. `PartialDimensionDistributionTask` uses `StringSketch` to generate the approximate distribution of dimension values for each input source split. If the rows are ungrouped, `PartialDimensionDistributionTask.UngroupedRowDimensionValueFilter` uses a Bloom filter to skip rows that would be grouped. The final distribution is sent back to the supervisor via `DimensionDistributionReport`. 2) The range partitions are determined. In `ParallelIndexSupervisorTask#determineAllRangePartitions()`, the supervisor uses `StringSketchMerger` to merge the individual `StringSketch`es created in the preceding phase. The merged sketch is then used to create the range partitions. 3) In parallel, generate partial range-partitioned segments. `PartialRangeSegmentGenerateTask` uses the range partitions determined in the preceding phase and `RangePartitionCachingLocalSegmentAllocator` to generate `SingleDimensionShardSpec`s. The partition information is sent back to the supervisor via `GeneratedGenericPartitionsReport`. 4) The partial range segments are grouped. In `ParallelIndexSupervisorTask#groupGenericPartitionLocationsPerPartition()`, the supervisor creates the `PartialGenericSegmentMergeIOConfig`s necessary for the next phase. 5) In parallel, merge partial range-partitioned segments. `PartialGenericSegmentMergeTask` uses `GenericPartitionLocation` to retrieve the partial range-partitioned segments generated earlier and then merges and publishes them. * Fix dependencies & forbidden apis * Fixes for integration test * Address review comments * Fix docs, strict compile, sketch check, rollup check * Fix first shard spec, partition serde, single subtask * Fix first partition check in test * Misc rewording/refactoring to address code review * Fix doc link * Split batch index integration test * Do not run parallel-batch-index twice * Adjust last partition * Split ITParallelIndexTest to reduce runtime * Rename test class * Allow null values in range partitions * Indicate which phase failed * Improve asserts in tests
2019-12-10 02:05:49 -05:00
</dependency>
2016-12-16 20:15:59 -05:00
<dependency>
<groupId>org.apache.calcite</groupId>
<artifactId>calcite-core</artifactId>
<version>${calcite.version}</version>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.easymock</groupId>
<artifactId>easymock</artifactId>
<version>4.0.2</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.12</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.powermock</groupId>
<artifactId>powermock-core</artifactId>
<version>${powermock.version}</version>
<scope>test</scope>
</dependency>
Lookup cache refactoring (the main part of #3667) (#3697) * Lookup cache refactoring (the main part of druid-io/druid#3667) * Use PowerMock's static methods in NamespaceLookupExtractorFactoryTest * Fix KafkaLookupExtractorFactoryTest * Use VisibleForTesting annotation instead of Javadoc comment * Create a NamespaceExtractionCacheManager separately for each test in NamespaceExtractionCacheManagersTest * Rename CacheScheduler.NoCache.ENTRY_DISPOSED to ENTRY_CLOSED * Reduce visibility of NamespaceExtractionCacheManager.cacheCount() and monitor() implementations, and don't run NamespaceExtractionCacheManagerExecutorsTest with off-heap cache (it didn't before) * In NamespaceLookupExtractorFactory, use safer idiom to check if CacheState is NoCache or VersionedCache * More logging in CacheHandler constructor and close(), VersionedCache.close() * PR comments addressed * Make CacheScheduler.EntryImpl AutoCloseable, avoid 'dispose' verb in comments, logging and naming in CacheScheduler in favor of 'close' * More Javadoc comments to CacheScheduler * Fix NPE * Remove logging in OnHeapNamespaceExtractionCacheManager.expungeCollectedCaches() * Make NamespaceExtractionCacheManagersTest.testRacyCreation() to have similar load to what it be before the refactoring * Unwrap NamespaceExtractionCacheManager.scheduledExecutorService from unneeded MoreExecutors.listeningDecorator() and specify that this is ScheduledThreadPoolExecutor, which ensures happens-before between periodic runs of the tasks * More comments on MapDbCacheDisposer.disposed * Replace concat with Long.toString() * Comment on why NamespaceExtractionCacheManager.scheduledExecutorService() returns ScheduledThreadPoolExecutor * Place logging statements in VersionedCache.close() and CacheHandler.close() after actual closing logic, because logging may fail * Make JDBCExtractionNamespaceCacheFactory and StaticMapExtractionNamespaceCacheFactory to try to close newly created VersionedCache if population has failed, as it is done already in URIExtractionNamespaceCacheFactory * Don't close the whole CacheScheduler.Entry, if the cache update task failed * Replace AtomicLong updateCounter and firstRunLatch with Phaser-based UpdateCounter in CacheScheduler.EntryImpl
2016-12-23 21:04:27 -05:00
<dependency>
<groupId>org.powermock</groupId>
<artifactId>powermock-module-junit4</artifactId>
<version>${powermock.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.powermock</groupId>
<artifactId>powermock-api-easymock</artifactId>
<version>${powermock.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-simple</artifactId>
<version>${slf4j.version}</version>
</dependency>
<dependency>
<groupId>com.carrotsearch</groupId>
<artifactId>junit-benchmarks</artifactId>
<version>0.7.2</version>
<scope>test</scope>
</dependency>
2012-12-04 19:29:51 -05:00
<dependency>
<groupId>com.google.caliper</groupId>
<artifactId>caliper</artifactId>
<version>0.5-rc1</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-test</artifactId>
2017-09-15 13:48:32 -04:00
<version>${apache.curator.test.version}</version>
2014-05-01 17:57:41 -04:00
<exclusions>
<exclusion>
<groupId>org.jboss.netty</groupId>
<artifactId>netty</artifactId>
</exclusion>
</exclusions>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.assertj</groupId>
<artifactId>assertj-core</artifactId>
<version>3.13.2</version>
<scope>test</scope>
</dependency>
2013-08-13 21:35:07 -04:00
<dependency>
<groupId>com.ircclouds.irc</groupId>
<artifactId>irc-api</artifactId>
2015-08-25 16:28:49 -04:00
<version>1.0-0014</version>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>com.maxmind.geoip2</groupId>
<artifactId>geoip2</artifactId>
<version>0.4.0</version>
<exclusions>
<exclusion>
<groupId>com.google.http-client</groupId>
<artifactId>google-http-client</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.http-client</groupId>
<artifactId>google-http-client-jackson2</artifactId>
</exclusion>
<exclusion>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
</exclusion>
</exclusions>
2013-08-14 17:31:55 -04:00
</dependency>
<dependency>
<groupId>org.postgresql</groupId>
<artifactId>postgresql</artifactId>
<version>${postgresql.version}</version>
</dependency>
<!-- GCP -->
<dependency>
<groupId>com.google.api-client</groupId>
<artifactId>google-api-client</artifactId>
<version>${com.google.apis.client.version}</version>
<exclusions>
<exclusion>
<groupId>com.google.code.findbugs</groupId>
<artifactId>jsr305</artifactId>
</exclusion>
<exclusion>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-core</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.guava</groupId>
<artifactId>guava-jdk5</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpcore</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>com.google.http-client</groupId>
<artifactId>google-http-client</artifactId>
<version>${com.google.apis.client.version}</version>
</dependency>
<dependency>
<groupId>com.google.http-client</groupId>
<artifactId>google-http-client-jackson2</artifactId>
<version>${com.google.apis.client.version}</version>
<exclusions>
<exclusion>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-core</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.module</groupId>
<artifactId>jackson-module-guice</artifactId>
<version>${jackson.version}</version>
<exclusions>
<exclusion>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-core</artifactId>
</exclusion>
<exclusion>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>io.github.resilience4j</groupId>
<artifactId>resilience4j-bulkhead</artifactId>
<version>${resilience4j.version}</version>
</dependency>
2014-11-06 13:23:24 -05:00
<dependency>
<groupId>org.testng</groupId>
<artifactId>testng</artifactId>
<version>6.8.7</version>
</dependency>
2015-02-22 11:58:35 -05:00
<dependency>
<groupId>org.hamcrest</groupId>
<artifactId>hamcrest-all</artifactId>
<version>${hamcrest.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.hamcrest</groupId>
<artifactId>hamcrest-core</artifactId>
<version>${hamcrest.version}</version>
2015-02-22 11:58:35 -05:00
<scope>test</scope>
</dependency>
<dependency>
<groupId>pl.pragmatists</groupId>
<artifactId>JUnitParams</artifactId>
<version>1.1.1</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava-testlib</artifactId>
<version>${guava.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>nl.jqno.equalsverifier</groupId>
<artifactId>equalsverifier</artifactId>
<version>3.1.11</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.github.stefanbirkner</groupId>
<artifactId>system-rules</artifactId>
<version>1.19.0</version>
<scope>test</scope>
</dependency>
</dependencies>
</dependencyManagement>
2012-10-24 03:39:51 -04:00
<build>
2015-03-09 16:44:54 -04:00
<plugins>
<plugin>
<groupId>org.jacoco</groupId>
<artifactId>jacoco-maven-plugin</artifactId>
<version>0.8.4</version>
<configuration>
<excludes>
<!-- Ignore generated code -->
<exclude>org/apache/druid/math/expr/antlr/Expr*</exclude> <!-- core -->
<exclude>org/apache/druid/**/generated/*Benchmark*</exclude> <!-- benchmarks -->
<exclude>org/apache/druid/data/input/influx/InfluxLineProtocol*</exclude> <!-- extensions-contrib/influx-extensions -->
<!-- Ignore non-production code -->
<exclude>org/apache/druid/benchmark/**/*</exclude> <!-- benchmarks -->
<exclude>org/apache/druid/**/*Benchmark*</exclude> <!-- benchmarks -->
<exclude>org/testng/DruidTestRunnerFactory*</exclude> <!-- benchmarks -->
<exclude>org/apache/druid/testing/**/*</exclude> <!-- integration-tests -->
</excludes>
</configuration>
<executions>
<execution>
<id>prepare-agent</id>
<goals>
<goal>prepare-agent</goal>
</goals>
<configuration>
<propertyName>jacocoArgLine</propertyName>
</configuration>
</execution>
</executions>
2015-03-09 16:44:54 -04:00
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-checkstyle-plugin</artifactId>
<version>3.0.0</version>
<configuration>
<sourceDirectories>
<sourceDirectory>${project.build.sourceDirectory}</sourceDirectory>
</sourceDirectories>
<includeTestSourceDirectory>true</includeTestSourceDirectory>
<configLocation>codestyle/checkstyle.xml</configLocation>
<suppressionsLocation>codestyle/checkstyle-suppressions.xml</suppressionsLocation>
<suppressionsFileExpression>checkstyle.suppressions.file</suppressionsFileExpression>
<encoding>UTF-8</encoding>
<headerLocation>codestyle/LICENSE.txt</headerLocation>
<consoleOutput>true</consoleOutput>
<failsOnError>true</failsOnError>
</configuration>
<dependencies>
<dependency>
<groupId>com.puppycrawl.tools</groupId>
<artifactId>checkstyle</artifactId>
2019-06-04 04:02:46 -04:00
<version>8.21</version>
</dependency>
</dependencies>
<executions>
<execution>
<id>validate</id>
<phase>validate</phase>
<goals>
<goal>check</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs-maven-plugin</artifactId>
<version>3.1.12</version>
<dependencies>
<!-- overwrite dependency on spotbugs if you want to specify the version of spotbugs -->
<dependency>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs</artifactId>
<version>3.1.12</version>
</dependency>
</dependencies>
<configuration>
<excludeFilterFile>codestyle/spotbugs-exclude.xml</excludeFilterFile>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-pmd-plugin</artifactId>
<version>3.8</version>
<configuration>
<printFailingErrors>true</printFailingErrors>
<rulesets>
<ruleset>/rulesets/java/imports.xml</ruleset>
</rulesets>
<excludeRoots>
<excludeRoot>target/generated-sources/</excludeRoot>
</excludeRoots>
</configuration>
<executions>
<execution>
<id>validate</id>
<phase>validate</phase>
<goals>
<goal>check</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>de.thetaphi</groupId>
<artifactId>forbiddenapis</artifactId>
<version>2.6</version>
<configuration>
<failOnUnresolvableSignatures>false</failOnUnresolvableSignatures>
<bundledSignatures>
<!--
This will automatically choose the right
signatures based on 'maven.compiler.target':
-->
<bundledSignature>jdk-unsafe</bundledSignature>
</bundledSignatures>
<signaturesFiles>
<signaturesFile>${project.parent.basedir}/codestyle/joda-time-forbidden-apis.txt</signaturesFile>
<signaturesFile>${project.parent.basedir}/codestyle/druid-forbidden-apis.txt</signaturesFile>
</signaturesFiles>
<excludes>
<exclude>**/SomeAvroDatum.class</exclude>
</excludes>
Add Kinesis Indexing Service to core Druid (#6431) * created seekablestream classes * created seekablestreamsupervisor class * first attempt to integrate kafa indexing service to use SeekableStream * seekablestream bug fixes * kafkarecordsupplier * integrated kafka indexing service with seekablestream * implemented resume/suspend and refactored some package names * moved kinesis indexing service into core druid extensions * merged some changes from kafka supervisor race condition * integrated kinesis-indexing-service with seekablestream * unite tests for kinesis-indexing-service * various bug fixes for kinesis-indexing-service * refactored kinesisindexingtask * finished up more kinesis unit tests * more bug fixes for kinesis-indexing-service * finsihed refactoring kinesis unit tests * removed KinesisParititons and KafkaPartitions to use SeekableStreamPartitions * kinesis-indexing-service code cleanup and docs * merge #6291 merge #6337 merge #6383 * added more docs and reordered methods * fixd kinesis tests after merging master and added docs in seekablestream * fix various things from pr comment * improve recordsupplier and add unit tests * migrated to aws-java-sdk-kinesis * merge changes from master * fix pom files and forbiddenapi checks * checkpoint JavaType bug fix * fix pom and stuff * disable checkpointing in kinesis * fix kinesis sequence number null in closed shard * merge changes from master * fixes for kinesis tasks * capitalized <partitionType, sequenceType> * removed abstract class loggers * conform to guava api restrictions * add docker for travis other modules test * address comments * improve RecordSupplier to supply records in batch * fix strict compile issue * add test scope for localstack dependency * kinesis indexing task refactoring * comments * github comments * minor fix * removed unneeded readme * fix deserialization bug * fix various bugs * KinesisRecordSupplier unable to catch up to earliest position in stream bug fix * minor changes to kinesis * implement deaggregate for kinesis * Merge remote-tracking branch 'upstream/master' into seekablestream * fix kinesis offset discrepancy with kafka * kinesis record supplier disable getPosition * pr comments * mock for kinesis tests and remove docker dependency for unit tests * PR comments * avg lag in kafkasupervisor #6587 * refacotred SequenceMetadata in taskRunners * small fix * more small fix * recordsupplier resource leak * revert .travis.yml formatting * fix style * kinesis docs * doc part2 * more docs * comments * comments*2 * revert string replace changes * comments * teamcity * comments part 1 * comments part 2 * comments part 3 * merge #6754 * fix injection binding * comments * KinesisRegion refactor * comments part idk lol * can't think of a commit msg anymore * remove possiblyResetDataSourceMetadata() for IncrementalPublishingTaskRunner * commmmmmmmmmments * extra error handling in KinesisRecordSupplier getRecords * comments * quickfix * typo * oof
2018-12-21 14:49:24 -05:00
<suppressAnnotations>
<annotation>**.SuppressForbidden</annotation>
</suppressAnnotations>
</configuration>
<executions>
<execution>
<id>compile</id>
<phase>compile</phase>
<goals>
<goal>check</goal>
</goals>
<configuration>
<bundledSignatures>
<!-- Check jdk-system-out only for production code, but not in test code -->
<bundledSignature>jdk-unsafe</bundledSignature>
<bundledSignature>jdk-system-out</bundledSignature>
</bundledSignatures>
</configuration>
</execution>
<execution>
<id>testCompile</id>
<phase>test-compile</phase>
<goals>
<goal>testCheck</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>animal-sniffer-maven-plugin</artifactId>
<version>1.17</version>
<executions>
<execution>
<id>check-java-api</id>
<phase>test</phase>
<goals>
<goal>check</goal>
</goals>
<configuration>
<signature>
<groupId>org.codehaus.mojo.signature
</groupId>
<artifactId>java18</artifactId>
<version>1.0</version>
</signature>
<ignores>
Monomorphic processing of TopN queries with 1 and 2 aggregators (key part of #3798) (#3889) * Monomorphic processing: add HotLoopCallee, CalledFromHotLoop, RuntimeShapeInspector, SpecializationService. Specialize topN queries with 1 or 2 aggregators. Add Cursor.advanceUninterruptibly() and isDoneOrInterrupted() for exception-free query processing. * Use Execs.singleThreaded() * RuntimeShapeInspector to support nullable fields * Make CalledFromHotLoop annotation Inherited * Remove unnecessary conversion of array of ColumnSelectorPluses to list and back to array in CardinalityAggregatorFactory * Close InputStream in SpecializationService * Formatting * Test specialized PooledTopNScanners * Set flags in PooledTopNAlgorithm directly * Fix tests, dependent on CountAggragatorFactory toString() form * Fix * Revert CountAggregatorFactory changes * Implement inspectRuntimeShape() for LongWrappingDimensionSelector and FloatWrappingDimensionSelector * Remove duplicate RoaringBitmap dependency in the extendedset pom.xml * Fix * Treat ByteBuffers specially in StringRuntimeShape * Doc fix * Annotate BufferAggregator.init() with CalledFromHotLoop * Make triggerSpecializationIterationsThreshold an int * Remove SpecializationService.PerPrototypeClassState.of() * Add comments * Limit the amount of specializations that SpecializationService could make * Add default implementation for BufferAggregator.inspectRuntimeShape(), for compatibility with extensions * Use more efficient ConcurrentMap's idioms in SpecializationService
2017-03-17 15:44:36 -04:00
<!-- Some of our code uses sun.* classes directly, which are not part of
the JDK signature (although they are there anyway). -->
<ignore>sun.nio.ch.DirectBuffer</ignore>
<ignore>sun.misc.Cleaner</ignore>
Monomorphic processing of TopN queries with 1 and 2 aggregators (key part of #3798) (#3889) * Monomorphic processing: add HotLoopCallee, CalledFromHotLoop, RuntimeShapeInspector, SpecializationService. Specialize topN queries with 1 or 2 aggregators. Add Cursor.advanceUninterruptibly() and isDoneOrInterrupted() for exception-free query processing. * Use Execs.singleThreaded() * RuntimeShapeInspector to support nullable fields * Make CalledFromHotLoop annotation Inherited * Remove unnecessary conversion of array of ColumnSelectorPluses to list and back to array in CardinalityAggregatorFactory * Close InputStream in SpecializationService * Formatting * Test specialized PooledTopNScanners * Set flags in PooledTopNAlgorithm directly * Fix tests, dependent on CountAggragatorFactory toString() form * Fix * Revert CountAggregatorFactory changes * Implement inspectRuntimeShape() for LongWrappingDimensionSelector and FloatWrappingDimensionSelector * Remove duplicate RoaringBitmap dependency in the extendedset pom.xml * Fix * Treat ByteBuffers specially in StringRuntimeShape * Doc fix * Annotate BufferAggregator.init() with CalledFromHotLoop * Make triggerSpecializationIterationsThreshold an int * Remove SpecializationService.PerPrototypeClassState.of() * Add comments * Limit the amount of specializations that SpecializationService could make * Add default implementation for BufferAggregator.inspectRuntimeShape(), for compatibility with extensions * Use more efficient ConcurrentMap's idioms in SpecializationService
2017-03-17 15:44:36 -04:00
<ignore>sun.misc.Unsafe</ignore>
Add Kinesis Indexing Service to core Druid (#6431) * created seekablestream classes * created seekablestreamsupervisor class * first attempt to integrate kafa indexing service to use SeekableStream * seekablestream bug fixes * kafkarecordsupplier * integrated kafka indexing service with seekablestream * implemented resume/suspend and refactored some package names * moved kinesis indexing service into core druid extensions * merged some changes from kafka supervisor race condition * integrated kinesis-indexing-service with seekablestream * unite tests for kinesis-indexing-service * various bug fixes for kinesis-indexing-service * refactored kinesisindexingtask * finished up more kinesis unit tests * more bug fixes for kinesis-indexing-service * finsihed refactoring kinesis unit tests * removed KinesisParititons and KafkaPartitions to use SeekableStreamPartitions * kinesis-indexing-service code cleanup and docs * merge #6291 merge #6337 merge #6383 * added more docs and reordered methods * fixd kinesis tests after merging master and added docs in seekablestream * fix various things from pr comment * improve recordsupplier and add unit tests * migrated to aws-java-sdk-kinesis * merge changes from master * fix pom files and forbiddenapi checks * checkpoint JavaType bug fix * fix pom and stuff * disable checkpointing in kinesis * fix kinesis sequence number null in closed shard * merge changes from master * fixes for kinesis tasks * capitalized <partitionType, sequenceType> * removed abstract class loggers * conform to guava api restrictions * add docker for travis other modules test * address comments * improve RecordSupplier to supply records in batch * fix strict compile issue * add test scope for localstack dependency * kinesis indexing task refactoring * comments * github comments * minor fix * removed unneeded readme * fix deserialization bug * fix various bugs * KinesisRecordSupplier unable to catch up to earliest position in stream bug fix * minor changes to kinesis * implement deaggregate for kinesis * Merge remote-tracking branch 'upstream/master' into seekablestream * fix kinesis offset discrepancy with kafka * kinesis record supplier disable getPosition * pr comments * mock for kinesis tests and remove docker dependency for unit tests * PR comments * avg lag in kafkasupervisor #6587 * refacotred SequenceMetadata in taskRunners * small fix * more small fix * recordsupplier resource leak * revert .travis.yml formatting * fix style * kinesis docs * doc part2 * more docs * comments * comments*2 * revert string replace changes * comments * teamcity * comments part 1 * comments part 2 * comments part 3 * merge #6754 * fix injection binding * comments * KinesisRegion refactor * comments part idk lol * can't think of a commit msg anymore * remove possiblyResetDataSourceMetadata() for IncrementalPublishingTaskRunner * commmmmmmmmmments * extra error handling in KinesisRecordSupplier getRecords * comments * quickfix * typo * oof
2018-12-21 14:49:24 -05:00
<!-- ignore java reflection polymorphic api signatures -->
<ignore>java.lang.invoke.MethodHandle</ignore>
</ignores>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-enforcer-plugin</artifactId>
<version>1.4.1</version>
<executions>
<execution>
Speedup Travis CI jobs (#8240) Reorganize Travis CI jobs into smaller faster (and more) jobs. Add various maven options to skip unnecessary work and refactored Travis CI job definitions to follow DRY. Detailed changes: .travis.yml - Refactor build logic to get rid of copy-and-paste logic - Skip static checks and enable parallelism for maven install - Split static analysis into different jobs to ease triage - Use "name" attribute instead of NAME environment variable - Split "indexing" and "web console" out of "other modules test" - Split 2 integration test jobs into multiple smaller jobs build.sh - Enable parallelism - Disable more static checks travis_script_integration.sh travis_script_integration_part2.sh integration-tests/README.md - Use TestNG groups instead of shell scripts and move definition of jobs into Travis CI yaml integration-tests/pom.xml - Show elapsed time of individual tests to aid in future rebalancing of Travis CI integration test jobs run time TestNGGroup.java - Use TestNG groups to make it easy to have multiple Travis CI integration test jobs. TestNG groups also make it easier to have an "other" integration test group and make it less likely a test will accidentally not be included in a CI job. IT*Test.java AbstractITBatchIndexTest.java AbstractKafkaIndexerTest.java - Add TestNG group - Fix various IntelliJ inspection warnings - Reduce scope of helper methods since the TestNG group annotation on the class makes TestNG consider all public methods as test methods pom.xml - Allow enforce plugin to be run from command-line - Bump resources plugin version so that "[debug] execute contextualize" output is correctly suppressed by "mvn -q" - Bump exec plugin version so that skip property is renamed from "skip" to "exec.skip" web-console/pom.xml - Add property to allow disabling javascript-related work. This property is overridden in Travis CI to speed up the jobs.
2019-08-07 12:52:42 -04:00
<id>default-cli</id>
<goals>
<goal>enforce</goal>
</goals>
<configuration>
<rules>
<requireJavaVersion>
<version>1.8.0</version>
</requireJavaVersion>
<bannedDependencies>
<excludes>
<!--LGPL licenced library-->
<exclude>com.google.code.findbugs:annotations</exclude>
</excludes>
</bannedDependencies>
</rules>
<fail>true</fail>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-jar-plugin</artifactId>
<configuration>
<archive>
<manifestEntries>
<Build-Jdk>${java.version} (${java.vendor} ${java.vm.version})</Build-Jdk>
<Build-OS>${os.name} ${os.arch} ${os.version}</Build-OS>
<Build-Timestamp>${git.build.time}</Build-Timestamp>
<Build-Version>${git.build.version}</Build-Version>
<Build-Revision>${git.commit.id}</Build-Revision>
<Build-Revision-Describe>${git.commit.id.describe}</Build-Revision-Describe>
</manifestEntries>
</archive>
</configuration>
</plugin>
<plugin>
<groupId>pl.project13.maven</groupId>
<artifactId>git-commit-id-plugin</artifactId>
<version>2.2.5</version>
<executions>
<execution>
<goals>
<goal>revision</goal>
</goals>
</execution>
</executions>
<configuration>
<dotGitDirectory>${project.basedir}/.git</dotGitDirectory>
<dateFormatTimeZone>Etc/UTC</dateFormatTimeZone>
<skipPoms>false</skipPoms>
<format>json</format>
<generateGitPropertiesFile>true</generateGitPropertiesFile>
<generateGitPropertiesFilename>${project.build.directory}/git.version</generateGitPropertiesFilename>
<failOnNoGitDirectory>false</failOnNoGitDirectory>
<excludeProperties>
<excludeProperty>git.build.user.email</excludeProperty>
<excludeProperty>git.build.host</excludeProperty>
<excludeProperty>git.commit.id.describe-short</excludeProperty>
<excludeProperty>git.commit.user.*</excludeProperty>
<excludeProperty>git.commit.message.*</excludeProperty>
<excludeProperty>git.closest.tag.*</excludeProperty>
<excludeProperty>git.commit.id.abbrev</excludeProperty>
<excludeProperty>git.dirty</excludeProperty>
</excludeProperties>
<gitDescribe>
<skip>false</skip>
<always>true</always>
<abbrev>7</abbrev>
<dirty>-dirty</dirty>
<forceLongFormat>true</forceLongFormat>
</gitDescribe>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-remote-resources-plugin</artifactId>
<executions>
<execution>
<id>process-resource-bundles</id>
<goals>
<goal>process</goal>
</goals>
<configuration>
<properties>
<projectName>Apache Druid</projectName>
</properties>
<resourceBundles>
<resourceBundle>org.apache.apache.resources:apache-jar-resource-bundle:1.5-SNAPSHOT</resourceBundle>
</resourceBundles>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.owasp</groupId>
<artifactId>dependency-check-maven</artifactId>
<version>5.3.0</version>
<configuration>
<cveValidForHours>24</cveValidForHours>
<failBuildOnCVSS>7</failBuildOnCVSS>
<skipProvidedScope>true</skipProvidedScope>
<skipSystemScope>true</skipSystemScope> <!-- avoid error when processing jdk.tools:jdk.tools:jar:1.8:system -->
<suppressionFile>owasp-dependency-check-suppressions.xml</suppressionFile>
</configuration>
<executions>
<execution>
<phase>none</phase> <!-- TODO: Consider enabling so part of dev flow instead of just CI -->
</execution>
</executions>
</plugin>
2015-03-09 16:44:54 -04:00
</plugins>
<pluginManagement>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
Query vectorization. (#6794) * Benchmarks: New SqlBenchmark, add caching & vectorization to some others. - Introduce a new SqlBenchmark geared towards benchmarking a wide variety of SQL queries. Rename the old SqlBenchmark to SqlVsNativeBenchmark. - Add (optional) caching to SegmentGenerator to enable easier benchmarking of larger segments. - Add vectorization to FilteredAggregatorBenchmark and GroupByBenchmark. * Query vectorization. This patch includes vectorized timeseries and groupBy engines, as well as some analogs of your favorite Druid classes: - VectorCursor is like Cursor. (It comes from StorageAdapter.makeVectorCursor.) - VectorColumnSelectorFactory is like ColumnSelectorFactory, and it has methods to create analogs of the column selectors you know and love. - VectorOffset and ReadableVectorOffset are like Offset and ReadableOffset. - VectorAggregator is like BufferAggregator. - VectorValueMatcher is like ValueMatcher. There are some noticeable differences between vectorized and regular execution: - Unlike regular cursors, vector cursors do not understand time granularity. They expect query engines to handle this on their own, which a new VectorCursorGranularizer class helps with. This is to avoid too much batch-splitting and to respect the fact that vector selectors are somewhat more heavyweight than regular selectors. - Unlike FilteredOffset, FilteredVectorOffset does not leverage indexes for filters that might partially support them (like an OR of one filter that supports indexing and another that doesn't). I'm not sure that this behavior is desirable anyway (it is potentially too eager) but, at any rate, it'd be better to harmonize it between the two classes. Potentially they should both do some different thing that is smarter than what either of them is doing right now. - When vector cursors are created by QueryableIndexCursorSequenceBuilder, they use a morphing binary-then-linear search to find their start and end rows, rather than linear search. Limitations in this patch are: - Only timeseries and groupBy have vectorized engines. - GroupBy doesn't handle multi-value dimensions yet. - Vector cursors cannot handle virtual columns or descending order. - Only some filters have vectorized matchers: "selector", "bound", "in", "like", "regex", "search", "and", "or", and "not". - Only some aggregators have vectorized implementations: "count", "doubleSum", "floatSum", "longSum", "hyperUnique", and "filtered". - Dimension specs other than "default" don't work yet (no extraction functions or filtered dimension specs). Currently, the testing strategy includes adding vectorization-enabled tests to TimeseriesQueryRunnerTest, GroupByQueryRunnerTest, GroupByTimeseriesQueryRunnerTest, CalciteQueryTest, and all of the filtering tests that extend BaseFilterTest. In all of those classes, there are some test cases that don't support vectorization. They are marked by special function calls like "cannotVectorize" or "skipVectorize" that tell the test harness to either expect an exception or to skip the test case. Testing should be expanded in the future -- a project in and of itself. Related to #3011. * WIP * Adjustments for unused things. * Adjust javadocs. * DimensionDictionarySelector adjustments. * Add "clone" to BatchIteratorAdapter. * ValueMatcher javadocs. * Fix benchmark. * Fixups post-merge. * Expect exception on testGroupByWithStringVirtualColumn for IncrementalIndex. * BloomDimFilterSqlTest: Tag two non-vectorizable tests. * Minor adjustments. * Update surefire, bump up Xmx in Travis. * Some more adjustments. * Javadoc adjustments * AggregatorAdapters adjustments. * Additional comments. * Remove switching search. * Only missiles.
2019-07-12 15:54:07 -04:00
<version>2.22.2</version>
<configuration>
<!-- locale settings must be set on the command line before startup -->
<!-- set default options -->
<argLine>
@{jacocoArgLine}
-Xmx1500m
-XX:MaxDirectMemorySize=512m
-Duser.language=en
-Duser.GroupByQueryRunnerTest.javacountry=US
-Dfile.encoding=UTF-8
-Duser.timezone=UTC
-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager
-Daws.region=us-east-1 <!-- required for s3-related unit tests -->
<!--@TODO After fixing https://github.com/apache/druid/issues/4964 remove this parameter-->
-Ddruid.indexing.doubleStorage=double
</argLine>
<trimStackTrace>false</trimStackTrace>
<!-- our tests are very verbose, let's keep the volume down -->
<redirectTestOutputToFile>true</redirectTestOutputToFile>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-release-plugin</artifactId>
<configuration>
<autoVersionSubmodules>true</autoVersionSubmodules>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-clean-plugin</artifactId>
<version>2.5</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
<version>3.1.1</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-deploy-plugin</artifactId>
<version>2.7</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-help-plugin</artifactId>
<version>2.1.1</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-install-plugin</artifactId>
<version>2.3.1</version>
2016-02-18 16:39:13 -05:00
<executions>
<execution>
<phase>package</phase>
<goals>
<goal>install</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-resources-plugin</artifactId>
Speedup Travis CI jobs (#8240) Reorganize Travis CI jobs into smaller faster (and more) jobs. Add various maven options to skip unnecessary work and refactored Travis CI job definitions to follow DRY. Detailed changes: .travis.yml - Refactor build logic to get rid of copy-and-paste logic - Skip static checks and enable parallelism for maven install - Split static analysis into different jobs to ease triage - Use "name" attribute instead of NAME environment variable - Split "indexing" and "web console" out of "other modules test" - Split 2 integration test jobs into multiple smaller jobs build.sh - Enable parallelism - Disable more static checks travis_script_integration.sh travis_script_integration_part2.sh integration-tests/README.md - Use TestNG groups instead of shell scripts and move definition of jobs into Travis CI yaml integration-tests/pom.xml - Show elapsed time of individual tests to aid in future rebalancing of Travis CI integration test jobs run time TestNGGroup.java - Use TestNG groups to make it easy to have multiple Travis CI integration test jobs. TestNG groups also make it easier to have an "other" integration test group and make it less likely a test will accidentally not be included in a CI job. IT*Test.java AbstractITBatchIndexTest.java AbstractKafkaIndexerTest.java - Add TestNG group - Fix various IntelliJ inspection warnings - Reduce scope of helper methods since the TestNG group annotation on the class makes TestNG consider all public methods as test methods pom.xml - Allow enforce plugin to be run from command-line - Bump resources plugin version so that "[debug] execute contextualize" output is correctly suppressed by "mvn -q" - Bump exec plugin version so that skip property is renamed from "skip" to "exec.skip" web-console/pom.xml - Add property to allow disabling javascript-related work. This property is overridden in Travis CI to speed up the jobs.
2019-08-07 12:52:42 -04:00
<version>3.1.0</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-shade-plugin</artifactId>
<version>2.2</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-site-plugin</artifactId>
<version>3.1</version>
</plugin>
<plugin>
<groupId>org.scala-tools</groupId>
<artifactId>maven-scala-plugin</artifactId>
<version>2.15.2</version>
</plugin>
2013-02-25 12:26:52 -05:00
<plugin>
<groupId>org.antlr</groupId>
<artifactId>antlr4-maven-plugin</artifactId>
<version>4.5.1</version>
2013-02-25 12:26:52 -05:00
</plugin>
2013-05-01 18:31:29 -04:00
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<version>3.1.0</version>
2013-05-01 18:31:29 -04:00
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>license-maven-plugin</artifactId>
<version>1.8</version>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>exec-maven-plugin</artifactId>
Speedup Travis CI jobs (#8240) Reorganize Travis CI jobs into smaller faster (and more) jobs. Add various maven options to skip unnecessary work and refactored Travis CI job definitions to follow DRY. Detailed changes: .travis.yml - Refactor build logic to get rid of copy-and-paste logic - Skip static checks and enable parallelism for maven install - Split static analysis into different jobs to ease triage - Use "name" attribute instead of NAME environment variable - Split "indexing" and "web console" out of "other modules test" - Split 2 integration test jobs into multiple smaller jobs build.sh - Enable parallelism - Disable more static checks travis_script_integration.sh travis_script_integration_part2.sh integration-tests/README.md - Use TestNG groups instead of shell scripts and move definition of jobs into Travis CI yaml integration-tests/pom.xml - Show elapsed time of individual tests to aid in future rebalancing of Travis CI integration test jobs run time TestNGGroup.java - Use TestNG groups to make it easy to have multiple Travis CI integration test jobs. TestNG groups also make it easier to have an "other" integration test group and make it less likely a test will accidentally not be included in a CI job. IT*Test.java AbstractITBatchIndexTest.java AbstractKafkaIndexerTest.java - Add TestNG group - Fix various IntelliJ inspection warnings - Reduce scope of helper methods since the TestNG group annotation on the class makes TestNG consider all public methods as test methods pom.xml - Allow enforce plugin to be run from command-line - Bump resources plugin version so that "[debug] execute contextualize" output is correctly suppressed by "mvn -q" - Bump exec plugin version so that skip property is renamed from "skip" to "exec.skip" web-console/pom.xml - Add property to allow disabling javascript-related work. This property is overridden in Travis CI to speed up the jobs.
2019-08-07 12:52:42 -04:00
<version>1.6.0</version>
</plugin>
2015-12-03 17:06:22 -05:00
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<version>2.10.1</version>
2015-12-03 17:06:22 -05:00
<configuration>
<!-- jdk8 started linting javadocs by default; ours are not fully compliant -->
<additionalparam>-Xdoclint:none</additionalparam>
<!-- HadoopFsWrapper javadocs cannot be generated due to missing annotations -->
<excludePackageNames>org.apache.hadoop.fs</excludePackageNames>
2015-12-03 17:06:22 -05:00
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.8.1</version>
<configuration>
<source>${maven.compiler.source}</source>
<target>${maven.compiler.target}</target>
</configuration>
</plugin>
<plugin>
<groupId>com.github.eirslett</groupId>
<artifactId>frontend-maven-plugin</artifactId>
<version>1.6</version>
<configuration>
<nodeVersion>v10.13.0</nodeVersion>
<npmVersion>6.4.1</npmVersion>
</configuration>
</plugin>
</plugins>
</pluginManagement>
2012-10-24 03:39:51 -04:00
</build>
<profiles>
<profile>
<id>java-9+</id>
<activation>
<jdk>[9,)</jdk>
</activation>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<inherited>true</inherited>
<!-- prefer release instead of source/target in JDK 9 and above -->
<configuration>
<release>${java.version}</release>
</configuration>
</plugin>
</plugins>
</build>
</profile>
<profile>
<id>strict</id>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
<compilerId>javac-with-errorprone</compilerId>
<forceJavacCompilerUse>true</forceJavacCompilerUse>
<fork>true</fork>
<meminitial>1024m</meminitial>
<maxmem>3000m</maxmem>
<source>${maven.compiler.source}</source>
<target>${maven.compiler.target}</target>
<showWarnings>false</showWarnings>
<compilerArgs>
<arg>-XepDisableWarningsInGeneratedCode</arg>
<arg>-Xep:ClassCanBeStatic:ERROR</arg>
<arg>-Xep:PreconditionsInvalidPlaceholder:ERROR</arg>
<arg>-Xep:MissingOverride:ERROR</arg>
<arg>-Xep:DefaultCharset:ERROR</arg>
<arg>-Xep:QualifierOrScopeOnInjectMethod:ERROR</arg>
<arg>-Xep:AssistedInjectAndInjectOnSameConstructor</arg>
<arg>-Xep:AutoFactoryAtInject</arg>
<arg>-Xep:ClassName</arg>
<arg>-Xep:ComparisonContractViolated</arg>
<arg>-Xep:DepAnn</arg>
<arg>-Xep:DivZero</arg>
<arg>-Xep:EmptyIf</arg>
<arg>-Xep:InjectInvalidTargetingOnScopingAnnotation</arg>
<arg>-Xep:InjectMoreThanOneQualifier</arg>
<arg>-Xep:InjectScopeAnnotationOnInterfaceOrAbstractClass</arg>
<arg>-Xep:InjectScopeOrQualifierAnnotationRetention</arg>
<arg>-Xep:InjectedConstructorAnnotations</arg>
<arg>-Xep:InsecureCryptoUsage</arg>
<arg>-Xep:JMockTestWithoutRunWithOrRuleAnnotation</arg>
<arg>-Xep:JavaxInjectOnFinalField</arg>
<arg>-Xep:LockMethodChecker</arg>
<arg>-Xep:LongLiteralLowerCaseSuffix</arg>
<arg>-Xep:NoAllocation</arg>
<arg>-Xep:NonRuntimeAnnotation</arg>
<arg>-Xep:NumericEquality</arg>
<arg>-Xep:ParameterPackage</arg>
<arg>-Xep:ProtoStringFieldReferenceEquality</arg>
<arg>-Xep:UnlockMethod</arg>
</compilerArgs>
</configuration>
<dependencies>
<dependency>
<groupId>org.codehaus.plexus</groupId>
<artifactId>plexus-compiler-javac-errorprone</artifactId>
<version>2.8.5</version>
</dependency>
<!-- override plexus-compiler-javac-errorprone's dependency on
Error Prone with the latest version -->
<dependency>
<groupId>com.google.errorprone</groupId>
<artifactId>error_prone_core</artifactId>
<version>2.3.2</version>
</dependency>
</dependencies>
</plugin>
</plugins>
</build>
</profile>
<profile>
<id>parallel-test</id>
<activation>
<activeByDefault>false</activeByDefault>
</activation>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<executions>
<execution>
<phase>test</phase>
<goals>
<goal>test</goal>
</goals>
</execution>
</executions>
<configuration combine.self="override">
<forkCount>${maven.fork.count}</forkCount>
<reuseForks>true</reuseForks>
<trimStackTrace>false</trimStackTrace>
<!-- locale settings must be set on the command line before startup -->
<!-- set heap size to work around https://github.com/travis-ci/travis-ci/issues/3396 -->
<argLine>-Xmx768m -Duser.language=en -Duser.country=US -Dfile.encoding=UTF-8
Add Kinesis Indexing Service to core Druid (#6431) * created seekablestream classes * created seekablestreamsupervisor class * first attempt to integrate kafa indexing service to use SeekableStream * seekablestream bug fixes * kafkarecordsupplier * integrated kafka indexing service with seekablestream * implemented resume/suspend and refactored some package names * moved kinesis indexing service into core druid extensions * merged some changes from kafka supervisor race condition * integrated kinesis-indexing-service with seekablestream * unite tests for kinesis-indexing-service * various bug fixes for kinesis-indexing-service * refactored kinesisindexingtask * finished up more kinesis unit tests * more bug fixes for kinesis-indexing-service * finsihed refactoring kinesis unit tests * removed KinesisParititons and KafkaPartitions to use SeekableStreamPartitions * kinesis-indexing-service code cleanup and docs * merge #6291 merge #6337 merge #6383 * added more docs and reordered methods * fixd kinesis tests after merging master and added docs in seekablestream * fix various things from pr comment * improve recordsupplier and add unit tests * migrated to aws-java-sdk-kinesis * merge changes from master * fix pom files and forbiddenapi checks * checkpoint JavaType bug fix * fix pom and stuff * disable checkpointing in kinesis * fix kinesis sequence number null in closed shard * merge changes from master * fixes for kinesis tasks * capitalized <partitionType, sequenceType> * removed abstract class loggers * conform to guava api restrictions * add docker for travis other modules test * address comments * improve RecordSupplier to supply records in batch * fix strict compile issue * add test scope for localstack dependency * kinesis indexing task refactoring * comments * github comments * minor fix * removed unneeded readme * fix deserialization bug * fix various bugs * KinesisRecordSupplier unable to catch up to earliest position in stream bug fix * minor changes to kinesis * implement deaggregate for kinesis * Merge remote-tracking branch 'upstream/master' into seekablestream * fix kinesis offset discrepancy with kafka * kinesis record supplier disable getPosition * pr comments * mock for kinesis tests and remove docker dependency for unit tests * PR comments * avg lag in kafkasupervisor #6587 * refacotred SequenceMetadata in taskRunners * small fix * more small fix * recordsupplier resource leak * revert .travis.yml formatting * fix style * kinesis docs * doc part2 * more docs * comments * comments*2 * revert string replace changes * comments * teamcity * comments part 1 * comments part 2 * comments part 3 * merge #6754 * fix injection binding * comments * KinesisRegion refactor * comments part idk lol * can't think of a commit msg anymore * remove possiblyResetDataSourceMetadata() for IncrementalPublishingTaskRunner * commmmmmmmmmments * extra error handling in KinesisRecordSupplier getRecords * comments * quickfix * typo * oof
2018-12-21 14:49:24 -05:00
-Duser.timezone=UTC -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager
-Daws.region=us-east-1 <!-- required for s3-related unit tests -->
<!--@TODO After fixing https://github.com/apache/druid/issues/4964 remove this parameter-->
Add Kinesis Indexing Service to core Druid (#6431) * created seekablestream classes * created seekablestreamsupervisor class * first attempt to integrate kafa indexing service to use SeekableStream * seekablestream bug fixes * kafkarecordsupplier * integrated kafka indexing service with seekablestream * implemented resume/suspend and refactored some package names * moved kinesis indexing service into core druid extensions * merged some changes from kafka supervisor race condition * integrated kinesis-indexing-service with seekablestream * unite tests for kinesis-indexing-service * various bug fixes for kinesis-indexing-service * refactored kinesisindexingtask * finished up more kinesis unit tests * more bug fixes for kinesis-indexing-service * finsihed refactoring kinesis unit tests * removed KinesisParititons and KafkaPartitions to use SeekableStreamPartitions * kinesis-indexing-service code cleanup and docs * merge #6291 merge #6337 merge #6383 * added more docs and reordered methods * fixd kinesis tests after merging master and added docs in seekablestream * fix various things from pr comment * improve recordsupplier and add unit tests * migrated to aws-java-sdk-kinesis * merge changes from master * fix pom files and forbiddenapi checks * checkpoint JavaType bug fix * fix pom and stuff * disable checkpointing in kinesis * fix kinesis sequence number null in closed shard * merge changes from master * fixes for kinesis tasks * capitalized <partitionType, sequenceType> * removed abstract class loggers * conform to guava api restrictions * add docker for travis other modules test * address comments * improve RecordSupplier to supply records in batch * fix strict compile issue * add test scope for localstack dependency * kinesis indexing task refactoring * comments * github comments * minor fix * removed unneeded readme * fix deserialization bug * fix various bugs * KinesisRecordSupplier unable to catch up to earliest position in stream bug fix * minor changes to kinesis * implement deaggregate for kinesis * Merge remote-tracking branch 'upstream/master' into seekablestream * fix kinesis offset discrepancy with kafka * kinesis record supplier disable getPosition * pr comments * mock for kinesis tests and remove docker dependency for unit tests * PR comments * avg lag in kafkasupervisor #6587 * refacotred SequenceMetadata in taskRunners * small fix * more small fix * recordsupplier resource leak * revert .travis.yml formatting * fix style * kinesis docs * doc part2 * more docs * comments * comments*2 * revert string replace changes * comments * teamcity * comments part 1 * comments part 2 * comments part 3 * merge #6754 * fix injection binding * comments * KinesisRegion refactor * comments part idk lol * can't think of a commit msg anymore * remove possiblyResetDataSourceMetadata() for IncrementalPublishingTaskRunner * commmmmmmmmmments * extra error handling in KinesisRecordSupplier getRecords * comments * quickfix * typo * oof
2018-12-21 14:49:24 -05:00
-Ddruid.indexing.doubleStorage=double
</argLine>
<!-- our tests are very verbose, let's keep the volume down -->
<redirectTestOutputToFile>true</redirectTestOutputToFile>
</configuration>
</plugin>
</plugins>
</build>
</profile>
<!-- Run Apache Rat license checks in a separate profile, because during local builds it doesn't skip files
that are not checked into Git -->
<profile>
<id>rat</id>
<build>
<plugins>
<plugin>
<groupId>org.apache.rat</groupId>
<artifactId>apache-rat-plugin</artifactId>
<version>0.12</version>
<executions>
<execution>
<phase>verify</phase>
<goals>
<goal>check</goal>
</goals>
</execution>
</executions>
<configuration>
<outputDirectory>${project.basedir}/rat</outputDirectory>
<licenses>
<license implementation="org.apache.rat.analysis.license.SimplePatternBasedLicense">
<licenseFamilyCategory>MIT</licenseFamilyCategory>
<licenseFamilyName>MIT JQuery</licenseFamilyName>
<notes></notes>
<patterns>
<pattern>Copyright 2005, 2014 jQuery Foundation, Inc. and other contributors</pattern>
<pattern>Copyright 2012 jQuery Foundation and other contributors; Licensed MIT</pattern>
<pattern>jQuery Foundation, Inc. | jquery.org/license</pattern>
</patterns>
</license>
<license implementation="org.apache.rat.analysis.license.SimplePatternBasedLicense">
<licenseFamilyCategory>Underscore</licenseFamilyCategory>
<licenseFamilyName>Underscore</licenseFamilyName>
<notes></notes>
<patterns>
<pattern>Underscore is freely distributable under the MIT license</pattern>
</patterns>
</license>
<license implementation="org.apache.rat.analysis.license.SimplePatternBasedLicense">
<licenseFamilyCategory>Allan Jardine</licenseFamilyCategory>
<licenseFamilyName>Allan Jardine</licenseFamilyName>
<notes></notes>
<patterns>
<pattern>Copyright 2009 Allan Jardine. All Rights Reserved</pattern>
</patterns>
</license>
<license implementation="org.apache.rat.analysis.license.SimplePatternBasedLicense">
<licenseFamilyCategory>Allan Jardine</licenseFamilyCategory>
<licenseFamilyName>Allan Jardine</licenseFamilyName>
<notes></notes>
<patterns>
<pattern>Copyright 2009 Allan Jardine. All Rights Reserved</pattern>
<pattern>Copyright 2008-2011 Allan Jardine</pattern>
<pattern>GPL v2 or BSD 3 point style</pattern>
</patterns>
</license>
</licenses>
<licenseFamilies>
<licenseFamily implementation="org.apache.rat.license.SimpleLicenseFamily">
<familyName>MIT JQuery</familyName>
</licenseFamily>
<licenseFamily implementation="org.apache.rat.license.SimpleLicenseFamily">
<familyName>Underscore</familyName>
</licenseFamily>
<licenseFamily implementation="org.apache.rat.license.SimpleLicenseFamily">
<familyName>Allan Jardine</familyName>
</licenseFamily>
</licenseFamilies>
<excludes>
<exclude>publications/**</exclude>
<exclude>codestyle/*-forbidden-apis.txt</exclude>
<exclude>conf/**</exclude>
<exclude>docker/*.conf</exclude>
<exclude>docker/service-supervisords/*.conf</exclude>
<exclude>target/**</exclude>
<exclude>licenses/**</exclude>
<exclude>**/test/resources/**</exclude>
<exclude>**/derby.log</exclude>
<exclude>**/jvm.config</exclude>
<exclude>**/*.avsc</exclude>
<exclude>**/*.iml</exclude>
<exclude>**/*.json</exclude>
<exclude>**/*.parq</exclude>
<exclude>**/*.parquet</exclude>
<exclude>LICENSE</exclude>
<exclude>LICENSE.BINARY</exclude>
<exclude>NOTICE</exclude>
<exclude>NOTICE.BINARY</exclude>
<exclude>LABELS</exclude>
<exclude>.github/ISSUE_TEMPLATE/*.md</exclude>
<exclude>.github/pull_request_template.md</exclude>
<exclude>git.version</exclude>
<exclude>node_modules/**</exclude>
<exclude>coordinator-console/**</exclude>
<exclude>src/**/*.snap</exclude>
<exclude>examples/conf/**</exclude>
<exclude>.asf.yaml</exclude>
</excludes>
</configuration>
</plugin>
</plugins>
</build>
</profile>
<!-- Prevent the source-release-assembly execution defined in the Apache parent POM from running
so we can control it ourselves -->
<profile>
<id>apache-release</id>
<build>
<plugins>
<plugin>
<artifactId>maven-assembly-plugin</artifactId>
<executions>
<execution>
<id>source-release-assembly</id>
<phase>none</phase>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.owasp</groupId>
<artifactId>dependency-check-maven</artifactId>
<executions>
<execution>
<phase>compile</phase>
<goals>
<goal>check</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</profile>
<profile>
<id>website-docs</id>
<modules>
<module>website</module>
</modules>
</profile>
</profiles>
</project>