diff --git a/LICENSE.BINARY b/LICENSE.BINARY
index 56016a74b17..ddd7492db44 100644
--- a/LICENSE.BINARY
+++ b/LICENSE.BINARY
@@ -654,40 +654,6 @@ BINARY/EXTENSIONS/druid-datasketches
* com.yahoo.datasketches:memory
-BINARY/EXTENSIONS/druid-examples
-
- This product bundles IRC API version 1.0-0014.
- * com.ircclouds.irc:irc-api
-
- This product bundles MaxMind GeoIP2 API version 0.4.0.
- * com.maxmind.geoip2:geoip2
-
- This product bundles the following Apache Commons libraries:
- * commons-beanutils 1.8.3
- * commons-validator 1.4.0
-
- This product bundles Twitter4J version 3.0.3.
- * org.twitter4j:twitter4j-async
- * org.twitter4j:twitter4j-core
- * org.twitter4j:twitter4j-stream
-
-
-BINARY/EXTENSIONS/druid-kafka-eight
-
- This product bundles Apache Kafka version 0.8.2.1.
- * org.apache.kafka:kafka_2.10
- * org.apache.kafka:kafka-clients
-
- This product bundles ZkClient version 0.3.
- * com.101tec:zkclient
-
- This product bundles Yammer Metrics version 2.2.0.
- * com.yammer.metrics:metrics-core
-
- This product bundles snappy-java version 1.1.1.6.
- * org.xerial.snappy:snappy-java
-
-
BINARY/EXTENSIONS/druid-kafka-indexing-service
This product bundles Apache Kafka version 0.10.2.2.
* org.apache.kafka:kafka-clients
@@ -785,12 +751,6 @@ BINARY/HADOOP-CLIENT
* org.slf4j:slf4j-log4j12
-BINARY/EXTENSIONS/druid-kafka-eight
- This product bundles JOpt Simple version 3.2., copyright Paul R. Holser, Jr.,
- which is available under an MIT license. For details, see licenses/bin/jopt-simple.MIT.
- * net.sf.jopt-simple:jopt-simple
-
-
BINARY/WEB-CONSOLE
The following dependency names are NPM package names (https://www.npmjs.com).
@@ -1052,13 +1012,6 @@ BINARY/EXTENSIONS/druid-kerberos
which is available under a BSD-3-Clause License. For details, see licenses/bin/jsch.BSD3.
* com.jcraft:jsch
-
-BINARY/EXTENSIONS/druid-kafka-eight
- This product bundles Scala Library version 2.10.4, copyright EPFL, Lightbend Inc.,
- which is available under a BSD-3-Clause License. For details, see licenses/bin/scala-lang.BSD3.
- * org.scala-lang:scala-library
-
-
BINARY/EXTENSIONS/druid-lookups-cached-single
This product bundles StringTemplate version 3.2, copyright Terrence Parr,
which is available under a BSD-3-Clause License. For details, see licenses/bin/antlr-stringtemplate.BSD3.
diff --git a/NOTICE.BINARY b/NOTICE.BINARY
index fba82ceaa0a..8b3e99f7383 100644
--- a/NOTICE.BINARY
+++ b/NOTICE.BINARY
@@ -1719,44 +1719,6 @@ http://gcc.gnu.org/onlinedocs/libstdc++/manual/license.html
-############ BINARY/EXTENSIONS/druid-kafka-eight ############
-
-================= metrics-core-2.2.0.jar =================
-Metrics
-Copyright 2010-2012 Coda Hale and Yammer, Inc.
-
-This product includes software developed by Coda Hale and Yammer, Inc.
-
-This product includes code derived from the JSR-166 project (ThreadLocalRandom), which was released
-with the following comments:
-
- Written by Doug Lea with assistance from members of JCP JSR-166
- Expert Group and released to the public domain, as explained at
- http://creativecommons.org/publicdomain/zero/1.0/
-
-
-
-
-================= snappy-1.1.1.6.jar =================
-This product includes software developed by Google
- Snappy: http://code.google.com/p/snappy/ (New BSD License)
-
-This product includes software developed by Apache
- PureJavaCrc32C from apache-hadoop-common http://hadoop.apache.org/
- (Apache 2.0 license)
-
-This library containd statically linked libstdc++. This inclusion is allowed by
-"GCC RUntime Library Exception"
-http://gcc.gnu.org/onlinedocs/libstdc++/manual/license.html
-
-== Contributors ==
- * Tatu Saloranta
- * Providing benchmark suite
- * Alec Wysoker
- * Performance and memory usage improvement
-
-
-
############ BINARY/EXTENSIONS/druid-kafka-indexing-service ############
diff --git a/core/src/main/java/org/apache/druid/data/input/FirehoseFactoryV2.java b/core/src/main/java/org/apache/druid/data/input/FirehoseFactoryV2.java
deleted file mode 100644
index 08259b4f69c..00000000000
--- a/core/src/main/java/org/apache/druid/data/input/FirehoseFactoryV2.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.druid.data.input;
-
-import com.fasterxml.jackson.annotation.JsonTypeInfo;
-import org.apache.druid.data.input.impl.InputRowParser;
-import org.apache.druid.guice.annotations.ExtensionPoint;
-import org.apache.druid.java.util.common.parsers.ParseException;
-
-import java.io.IOException;
-
-/**
- * Initialization method that connects up the FirehoseV2. If this method returns successfully it should be safe to
- * call start() on the returned FirehoseV2 (which might subsequently block).
- *
- * In contrast to V1 version, FirehoseFactoryV2 is able to pass an additional json-serialized object to FirehoseV2,
- * which contains last commit metadata
- *
- *
- * If this method returns null, then any attempt to call start(), advance(), currRow(), makeCommitter() and close() on the return
- * value will throw a surprising NPE. Throwing IOException on connection failure or runtime exception on
- * invalid configuration is preferred over returning null.
- */
-@ExtensionPoint
-@JsonTypeInfo(use = JsonTypeInfo.Id.NAME, property = "type")
-public interface FirehoseFactoryV2
-{
- /**
- * This method is declared to throw {@link IOException}, although it's not thrown in the implementations in Druid
- * code, for compatibility with third-party extensions.
- */
- @SuppressWarnings("RedundantThrows")
- FirehoseV2 connect(T parser, Object lastCommit) throws IOException, ParseException;
-}
diff --git a/core/src/main/java/org/apache/druid/data/input/FirehoseV2.java b/core/src/main/java/org/apache/druid/data/input/FirehoseV2.java
deleted file mode 100644
index c6aa33f95d2..00000000000
--- a/core/src/main/java/org/apache/druid/data/input/FirehoseV2.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.druid.data.input;
-
-import org.apache.druid.guice.annotations.ExtensionPoint;
-
-import java.io.Closeable;
-
-/**
- * This is an interface that holds onto the stream of incoming data. Realtime data ingestion is built around this
- * abstraction. In order to add a new type of source for realtime data ingestion, all you need to do is implement
- * one of these and register it with the Main.
- *
- * In contrast to Firehose v1 version, FirehoseV2 will always operate in a "peek, then advance" manner.
- * And the intended usage patttern is
- * 1. Call start()
- * 2. Read currRow()
- * 3. Call advance()
- * 4. If index should be committed: commit()
- * 5. GOTO 2
- *
- * Note that commit() is being called *after* advance.
- *
- * This object acts a lot like an Iterator, but it doesn't extend the Iterator interface because it extends
- * Closeable and it is very important that the close() method doesn't get forgotten, which is easy to do if this
- * gets passed around as an Iterator.
- *
- * The implementation of this interface only needs to be minimally thread-safe. The methods {@link #start()}, {@link
- * #advance()}, {@link #currRow()} and {@link #makeCommitter()} are all called from the same thread. {@link
- * #makeCommitter()}, however, returns a callback which will be called on another thread, so the operations inside of
- * that callback must be thread-safe.
- */
-@ExtensionPoint
-public interface FirehoseV2 extends Closeable
-{
- /**
- * For initial start
- */
- void start();
-
- /**
- * Advance the firehose to the next offset. Implementations of this interface should make sure that
- * if advance() is called and throws out an exception, the next call to currRow() should return a
- * null value.
- *
- * @return true if and when there is another row available, false if the stream has dried up
- */
- boolean advance();
-
- /**
- * @return The current row
- */
- InputRow currRow();
-
- /**
- * Returns a Committer that will "commit" everything read up to the point at which makeCommitter() is called.
- *
- * This method is called when the main processing loop starts to persist its current batch of things to process.
- * The returned committer will be run when the current batch has been successfully persisted
- * and the metadata the committer carries can also be persisted along with segment data. There is usually
- * some time lag between when this method is called and when the runnable is run. The Runnable is also run on
- * a separate thread so its operation should be thread-safe.
- *
- * Note that "correct" usage of this interface will always call advance() before commit() if the current row
- * is considered in the commit.
- *
- * The Runnable is essentially just a lambda/closure that is run() after data supplied by this instance has
- * been committed on the writer side of this interface protocol.
- *
- * A simple implementation of this interface might do nothing when run() is called,
- * and save proper commit information in metadata
- */
- Committer makeCommitter();
-}
diff --git a/distribution/pom.xml b/distribution/pom.xml
index d74d15547ee..e9c23d6f5de 100644
--- a/distribution/pom.xml
+++ b/distribution/pom.xml
@@ -168,8 +168,6 @@
-corg.apache.druid.extensions:druid-histogram-c
- org.apache.druid.extensions:druid-kafka-eight
- -corg.apache.druid.extensions:druid-kafka-extraction-namespace-corg.apache.druid.extensions:druid-kafka-indexing-service
@@ -200,8 +198,6 @@
-corg.apache.druid.extensions:druid-stats-c
- org.apache.druid.extensions:druid-examples
- -corg.apache.druid.extensions:simple-client-sslcontext-corg.apache.druid.extensions:druid-basic-security
@@ -318,16 +314,12 @@
-corg.apache.druid.extensions.contrib:druid-distinctcount-c
- org.apache.druid.extensions.contrib:druid-rocketmq
- -corg.apache.druid.extensions.contrib:graphite-emitter-corg.apache.druid.extensions.contrib:druid-influx-extensions-corg.apache.druid.extensions.contrib:druid-influxdb-emitter-c
- org.apache.druid.extensions.contrib:druid-kafka-eight-simple-consumer
- -corg.apache.druid.extensions.contrib:kafka-emitter-corg.apache.druid.extensions.contrib:materialized-view-maintenance
@@ -336,8 +328,6 @@
-corg.apache.druid.extensions.contrib:druid-opentsdb-emitter-c
- org.apache.druid.extensions.contrib:druid-rabbitmq
- -corg.apache.druid.extensions.contrib:druid-redis-cache-corg.apache.druid.extensions.contrib:sqlserver-metadata-storage
diff --git a/docs/_redirects.json b/docs/_redirects.json
index 508aedf2d5f..88f94ce702d 100644
--- a/docs/_redirects.json
+++ b/docs/_redirects.json
@@ -166,9 +166,13 @@
{"source": "development/community-extensions/rabbitmq.html", "target": "../extensions-contrib/rabbitmq.html"},
{"source": "development/extensions-core/namespaced-lookup.html", "target": "lookups-cached-global.html"},
{"source": "operations/performance-faq.html", "target": "../operations/basic-cluster-tuning.html"},
- {"source": "development/extensions-contrib/orc.html", "target": "../extensions-core/orc.html"}
+ {"source": "development/extensions-contrib/orc.html", "target": "../extensions-core/orc.html"},
{"source": "operations/performance-faq.html", "target": "../operations/basic-cluster-tuning.html"},
{"source": "configuration/realtime.md", "target": "../ingestion/standalone-realtime.html"},
{"source": "design/realtime.md", "target": "../ingestion/standalone-realtime.html"},
- {"source": "ingestion/stream-pull.md", "target": "../ingestion/standalone-realtime.html"}
+ {"source": "ingestion/stream-pull.md", "target": "../ingestion/standalone-realtime.html"},
+ {"source": "development/extensions-core/kafka-eight-firehose.md", "target": "../../ingestion/standalone-realtime.html"},
+ {"source": "development/extensions-contrib/kafka-simple.md", "target": "../../ingestion/standalone-realtime.html"},
+ {"source": "development/extensions-contrib/rabbitmq.md", "target": "../../ingestion/standalone-realtime.html"},
+ {"source": "development/extensions-contrib/rocketmq.md", "target": "../../ingestion/standalone-realtime.html"},
]
diff --git a/docs/content/development/extensions-contrib/kafka-simple.md b/docs/content/development/extensions-contrib/kafka-simple.md
deleted file mode 100644
index 3211efe90b3..00000000000
--- a/docs/content/development/extensions-contrib/kafka-simple.md
+++ /dev/null
@@ -1,56 +0,0 @@
----
-layout: doc_page
-title: "Kafka Simple Consumer"
----
-
-
-
-# Kafka Simple Consumer
-
-To use this Apache Druid (incubating) extension, make sure to [include](../../operations/including-extensions.html) `druid-kafka-eight-simpleConsumer` extension.
-
-## Firehose
-
-This is an experimental firehose to ingest data from Apache Kafka using the Kafka simple consumer api. Currently, this firehose would only work inside standalone realtime processes.
-The configuration for KafkaSimpleConsumerFirehose is similar to the Kafka Eight Firehose , except `firehose` should be replaced with `firehoseV2` like this:
-
-```json
-"firehoseV2": {
- "type" : "kafka-0.8-v2",
- "brokerList" : ["localhost:4443"],
- "queueBufferLength":10001,
- "resetOffsetToEarliest":"true",
- "partitionIdList" : ["0"],
- "clientId" : "localclient",
- "feed": "wikipedia"
-}
-```
-
-|property|description|required?|
-|--------|-----------|---------|
-|type|kafka-0.8-v2|yes|
-|brokerList|list of the kafka brokers|yes|
-|queueBufferLength|the buffer length for kafka message queue|no default(20000)|
-|resetOffsetToEarliest|in case of kafkaOffsetOutOfRange error happens, consumer should starts from the earliest or latest message available|true|
-|partitionIdList|list of kafka partition ids|yes|
-|clientId|the clientId for kafka SimpleConsumer|yes|
-|feed|kafka topic|yes|
-
-For using this firehose at scale and possibly in production, it is recommended to set replication factor to at least three, which means at least three Kafka brokers in the `brokerList`. For a 1*10^4 events per second kafka topic, keeping one partition can work properly, but more partitions could be added if higher throughput is required.
diff --git a/docs/content/development/extensions-contrib/rabbitmq.md b/docs/content/development/extensions-contrib/rabbitmq.md
deleted file mode 100644
index e9eefc556bc..00000000000
--- a/docs/content/development/extensions-contrib/rabbitmq.md
+++ /dev/null
@@ -1,81 +0,0 @@
----
-layout: doc_page
-title: "RabbitMQ"
----
-
-
-
-# RabbitMQ
-
-To use this Apache Druid (incubating) extension, make sure to [include](../../operations/including-extensions.html) `druid-rabbitmq` extension.
-
-## Firehose
-
-#### RabbitMQFirehose
-
-This firehose ingests events from a define rabbit-mq queue.
-
-**Note:** Add **amqp-client-3.2.1.jar** to lib directory of druid to use this firehose.
-
-A sample spec for rabbitmq firehose:
-
-```json
-"firehose" : {
- "type" : "rabbitmq",
- "connection" : {
- "host": "localhost",
- "port": "5672",
- "username": "test-dude",
- "password": "test-word",
- "virtualHost": "test-vhost",
- "uri": "amqp://mqserver:1234/vhost"
- },
- "config" : {
- "exchange": "test-exchange",
- "queue" : "druidtest",
- "routingKey": "#",
- "durable": "true",
- "exclusive": "false",
- "autoDelete": "false",
- "maxRetries": "10",
- "retryIntervalSeconds": "1",
- "maxDurationSeconds": "300"
- }
-}
-```
-
-|property|description|default|required?|
-|--------|-----------|-------|---------|
-|type|This should be "rabbitmq"|N/A|yes|
-|host|The hostname of the RabbitMQ broker to connect to|localhost|no|
-|port|The port number to connect to on the RabbitMQ broker|5672|no|
-|username|The username to use to connect to RabbitMQ|guest|no|
-|password|The password to use to connect to RabbitMQ|guest|no|
-|virtualHost|The virtual host to connect to|/|no|
-|uri|The URI string to use to connect to RabbitMQ| |no|
-|exchange|The exchange to connect to| |yes|
-|queue|The queue to connect to or create| |yes|
-|routingKey|The routing key to use to bind the queue to the exchange| |yes|
-|durable|Whether the queue should be durable|false|no|
-|exclusive|Whether the queue should be exclusive|false|no|
-|autoDelete|Whether the queue should auto-delete on disconnect|false|no|
-|maxRetries|The max number of reconnection retry attempts| |yes|
-|retryIntervalSeconds|The reconnection interval| |yes|
-|maxDurationSeconds|The max duration of trying to reconnect| |yes|
diff --git a/docs/content/development/extensions-contrib/rocketmq.md b/docs/content/development/extensions-contrib/rocketmq.md
deleted file mode 100644
index 4dd0eeab0e2..00000000000
--- a/docs/content/development/extensions-contrib/rocketmq.md
+++ /dev/null
@@ -1,29 +0,0 @@
----
-layout: doc_page
-title: "RocketMQ"
----
-
-
-
-# RocketMQ
-
-To use this Apache Druid (incubating) extension, make sure to [include](../../operations/including-extensions.html) `druid-rocketmq` extension.
-
-Original author: [https://github.com/lizhanhui](https://github.com/lizhanhui).
diff --git a/docs/content/development/extensions-core/kafka-eight-firehose.md b/docs/content/development/extensions-core/kafka-eight-firehose.md
deleted file mode 100644
index 740e5fa65f8..00000000000
--- a/docs/content/development/extensions-core/kafka-eight-firehose.md
+++ /dev/null
@@ -1,54 +0,0 @@
----
-layout: doc_page
-title: "Apache Kafka Eight Firehose"
----
-
-
-
-# Kafka Eight Firehose
-
-To use this Apache Druid (incubating) extension, make sure to [include](../../operations/including-extensions.html) `druid-kafka-eight` as an extension.
-
-This firehose acts as a Kafka 0.8.x consumer and ingests data from Kafka.
-
-Sample spec:
-
-```json
-"firehose": {
- "type": "kafka-0.8",
- "consumerProps": {
- "zookeeper.connect": "localhost:2181",
- "zookeeper.connection.timeout.ms" : "15000",
- "zookeeper.session.timeout.ms" : "15000",
- "zookeeper.sync.time.ms" : "5000",
- "group.id": "druid-example",
- "fetch.message.max.bytes" : "1048586",
- "auto.offset.reset": "largest",
- "auto.commit.enable": "false"
- },
- "feed": "wikipedia"
-}
-```
-
-|property|description|required?|
-|--------|-----------|---------|
-|type|This should be "kafka-0.8"|yes|
-|consumerProps|The full list of consumer configs can be [here](https://kafka.apache.org/08/configuration.html).|yes|
-|feed|Kafka maintains feeds of messages in categories called topics. This is the topic name.|yes|
diff --git a/docs/content/development/extensions.md b/docs/content/development/extensions.md
index 41a7aa1766d..7b51b7ab056 100644
--- a/docs/content/development/extensions.md
+++ b/docs/content/development/extensions.md
@@ -48,7 +48,6 @@ Core extensions are maintained by Druid committers.
|druid-datasketches|Support for approximate counts and set operations with [DataSketches](https://datasketches.github.io/).|[link](../development/extensions-core/datasketches-extension.html)|
|druid-hdfs-storage|HDFS deep storage.|[link](../development/extensions-core/hdfs.html)|
|druid-histogram|Approximate histograms and quantiles aggregator. Deprecated, please use the [DataSketches quantiles aggregator](../development/extensions-core/datasketches-quantiles.html) from the `druid-datasketches` extension instead.|[link](../development/extensions-core/approximate-histograms.html)|
-|druid-kafka-eight|Kafka ingest firehose (high level consumer) for realtime nodes(deprecated).|[link](../development/extensions-core/kafka-eight-firehose.html)|
|druid-kafka-extraction-namespace|Kafka-based namespaced lookup. Requires namespace lookup extension.|[link](../development/extensions-core/kafka-extraction-namespace.html)|
|druid-kafka-indexing-service|Supervised exactly-once Kafka ingestion for the indexing service.|[link](../development/extensions-core/kafka-ingestion.html)|
|druid-kinesis-indexing-service|Supervised exactly-once Kinesis ingestion for the indexing service.|[link](../development/extensions-core/kinesis-ingestion.html)|
@@ -83,10 +82,7 @@ All of these community extensions can be downloaded using [pull-deps](../operati
|druid-cassandra-storage|Apache Cassandra deep storage.|[link](../development/extensions-contrib/cassandra.html)|
|druid-cloudfiles-extensions|Rackspace Cloudfiles deep storage and firehose.|[link](../development/extensions-contrib/cloudfiles.html)|
|druid-distinctcount|DistinctCount aggregator|[link](../development/extensions-contrib/distinctcount.html)|
-|druid-kafka-eight-simpleConsumer|Kafka ingest firehose (low level consumer)(deprecated).|[link](../development/extensions-contrib/kafka-simple.html)|
-|druid-rabbitmq|RabbitMQ firehose.|[link](../development/extensions-contrib/rabbitmq.html)|
|druid-redis-cache|A cache implementation for Druid based on Redis.|[link](../development/extensions-contrib/redis-cache.html)|
-|druid-rocketmq|RocketMQ firehose.|[link](../development/extensions-contrib/rocketmq.html)|
|druid-time-min-max|Min/Max aggregator for timestamp.|[link](../development/extensions-contrib/time-min-max.html)|
|druid-google-extensions|Google Cloud Storage deep storage.|[link](../development/extensions-contrib/google.html)|
|sqlserver-metadata-storage|Microsoft SqlServer deep storage.|[link](../development/extensions-contrib/sqlserver.html)|
diff --git a/docs/content/ingestion/standalone-realtime.md b/docs/content/ingestion/standalone-realtime.md
index 81ce89d6f09..27065c5e500 100644
--- a/docs/content/ingestion/standalone-realtime.md
+++ b/docs/content/ingestion/standalone-realtime.md
@@ -39,5 +39,8 @@ removed completely in Druid 0.16.0. Operationally, realtime nodes were difficult
each node required an unique configuration. The design of the stream pull ingestion system for realtime nodes also
suffered from limitations which made it not possible to achieve exactly once ingestion.
+The extensions `druid-kafka-eight`, `druid-kafka-eight-simpleConsumer`, `druid-rabbitmq`, and `druid-rocketmq` were also
+removed at this time, since they were built to operate on the realtime nodes.
+
Please consider using the [Kafka Indexing Service](../development/extensions-core/kafka-ingestion.html) or
[Kinesis Indexing Service](../development/extensions-core/kinesis-ingestion.md) for stream pull ingestion instead.
diff --git a/docs/content/operations/pull-deps.md b/docs/content/operations/pull-deps.md
index 2af9a7d93d6..63cd5504418 100644
--- a/docs/content/operations/pull-deps.md
+++ b/docs/content/operations/pull-deps.md
@@ -92,10 +92,10 @@ To run `pull-deps`, you should
Example:
-Suppose you want to download ```druid-rabbitmq```, ```mysql-metadata-storage``` and ```hadoop-client```(both 2.3.0 and 2.4.0) with a specific version, you can run `pull-deps` command with `-c org.apache.druid.extensions:druid-examples:#{DRUIDVERSION}`, `-c org.apache.druid.extensions:mysql-metadata-storage:#{DRUIDVERSION}`, `-h org.apache.hadoop:hadoop-client:2.3.0` and `-h org.apache.hadoop:hadoop-client:2.4.0`, an example command would be:
+Suppose you want to download ```mysql-metadata-storage``` and ```hadoop-client```(both 2.3.0 and 2.4.0) with a specific version, you can run `pull-deps` command with `-c org.apache.druid.extensions:mysql-metadata-storage:#{DRUIDVERSION}`, `-h org.apache.hadoop:hadoop-client:2.3.0` and `-h org.apache.hadoop:hadoop-client:2.4.0`, an example command would be:
```
-java -classpath "/my/druid/lib/*" org.apache.druid.cli.Main tools pull-deps --clean -c org.apache.druid.extensions:mysql-metadata-storage:#{DRUIDVERSION} -c org.apache.druid.extensions.contrib:druid-rabbitmq:#{DRUIDVERSION} -h org.apache.hadoop:hadoop-client:2.3.0 -h org.apache.hadoop:hadoop-client:2.4.0
+java -classpath "/my/druid/lib/*" org.apache.druid.cli.Main tools pull-deps --clean -c org.apache.druid.extensions:mysql-metadata-storage:#{DRUIDVERSION} -h org.apache.hadoop:hadoop-client:2.3.0 -h org.apache.hadoop:hadoop-client:2.4.0
```
Because `--clean` is supplied, this command will first remove the directories specified at `druid.extensions.directory` and `druid.extensions.hadoopDependenciesDir`, then recreate them and start downloading the extensions there. After finishing downloading, if you go to the extension directories you specified, you will see
@@ -103,15 +103,6 @@ Because `--clean` is supplied, this command will first remove the directories sp
```
tree extensions
extensions
-├── druid-examples
-│ ├── commons-beanutils-1.8.3.jar
-│ ├── commons-digester-1.8.jar
-│ ├── commons-logging-1.1.1.jar
-│ ├── commons-validator-1.4.0.jar
-│ ├── druid-examples-#{DRUIDVERSION}.jar
-│ ├── twitter4j-async-3.0.3.jar
-│ ├── twitter4j-core-3.0.3.jar
-│ └── twitter4j-stream-3.0.3.jar
└── mysql-metadata-storage
└── mysql-metadata-storage-#{DRUIDVERSION}.jar
```
@@ -138,10 +129,10 @@ hadoop-dependencies/
..... lots of jars
```
-Note that if you specify `--defaultVersion`, you don't have to put version information in the coordinate. For example, if you want both `druid-rabbitmq` and `mysql-metadata-storage` to use version `#{DRUIDVERSION}`, you can change the command above to
+Note that if you specify `--defaultVersion`, you don't have to put version information in the coordinate. For example, if you want `mysql-metadata-storage` to use version `#{DRUIDVERSION}`, you can change the command above to
```
-java -classpath "/my/druid/lib/*" org.apache.druid.cli.Main tools pull-deps --defaultVersion #{DRUIDVERSION} --clean -c org.apache.druid.extensions:mysql-metadata-storage -c org.apache.druid.extensions.contrib:druid-rabbitmq -h org.apache.hadoop:hadoop-client:2.3.0 -h org.apache.hadoop:hadoop-client:2.4.0
+java -classpath "/my/druid/lib/*" org.apache.druid.cli.Main tools pull-deps --defaultVersion #{DRUIDVERSION} --clean -c org.apache.druid.extensions:mysql-metadata-storage -h org.apache.hadoop:hadoop-client:2.3.0 -h org.apache.hadoop:hadoop-client:2.4.0
```
diff --git a/examples/pom.xml b/examples/pom.xml
deleted file mode 100644
index 37aded9b8a0..00000000000
--- a/examples/pom.xml
+++ /dev/null
@@ -1,134 +0,0 @@
-
-
-
-
- 4.0.0
-
- org.apache.druid.extensions
- druid-examples
- druid-examples
- druid-examples
-
-
- org.apache.druid
- druid
- 0.16.0-incubating-SNAPSHOT
-
-
-
-
- org.apache.druid
- druid-server
- ${project.parent.version}
- provided
-
-
- org.apache.druid
- druid-core
- ${project.parent.version}
- provided
-
-
-
- org.twitter4j
- twitter4j-core
- 3.0.3
-
-
- org.twitter4j
- twitter4j-async
- 3.0.3
-
-
- org.twitter4j
- twitter4j-stream
- 3.0.3
-
-
- commons-validator
- commons-validator
- 1.5.1
-
-
- com.ircclouds.irc
- irc-api
-
-
- com.maxmind.geoip2
- geoip2
-
-
- com.google.guava
- guava
-
-
-
-
-
-
- junit
- junit
- test
-
-
-
-
-
-
- org.apache.maven.plugins
- maven-shade-plugin
-
-
- package
-
- shade
-
-
-
- ${project.build.directory}/${project.artifactId}-${project.version}-selfcontained.jar
-
-
-
- *:*
-
- META-INF/*.SF
- META-INF/*.DSA
- META-INF/*.RSA
-
-
-
-
-
-
-
-
- org.apache.maven.plugins
- maven-jar-plugin
-
-
-
- test-jar
-
-
-
-
-
-
-
diff --git a/examples/src/main/java/org/apache/druid/examples/ExamplesDruidModule.java b/examples/src/main/java/org/apache/druid/examples/ExamplesDruidModule.java
deleted file mode 100644
index 8bf13d0cf9c..00000000000
--- a/examples/src/main/java/org/apache/druid/examples/ExamplesDruidModule.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.druid.examples;
-
-import com.fasterxml.jackson.databind.Module;
-import com.fasterxml.jackson.databind.jsontype.NamedType;
-import com.fasterxml.jackson.databind.module.SimpleModule;
-import com.google.inject.Binder;
-import org.apache.druid.examples.twitter.TwitterSpritzerFirehoseFactory;
-import org.apache.druid.examples.wikipedia.IrcFirehoseFactory;
-import org.apache.druid.examples.wikipedia.IrcInputRowParser;
-import org.apache.druid.initialization.DruidModule;
-
-import java.util.Collections;
-import java.util.List;
-
-/**
- */
-public class ExamplesDruidModule implements DruidModule
-{
- @Override
- public List extends Module> getJacksonModules()
- {
- return Collections.singletonList(
- new SimpleModule("ExamplesModule")
- .registerSubtypes(
- new NamedType(TwitterSpritzerFirehoseFactory.class, "twitzer"),
- new NamedType(IrcFirehoseFactory.class, "irc"),
- new NamedType(IrcInputRowParser.class, "irc")
- )
- );
- }
-
- @Override
- public void configure(Binder binder)
- {
-
- }
-}
diff --git a/examples/src/main/java/org/apache/druid/examples/twitter/TwitterSpritzerFirehoseFactory.java b/examples/src/main/java/org/apache/druid/examples/twitter/TwitterSpritzerFirehoseFactory.java
deleted file mode 100644
index 300db0d8313..00000000000
--- a/examples/src/main/java/org/apache/druid/examples/twitter/TwitterSpritzerFirehoseFactory.java
+++ /dev/null
@@ -1,387 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.druid.examples.twitter;
-
-import com.fasterxml.jackson.annotation.JsonCreator;
-import com.fasterxml.jackson.annotation.JsonProperty;
-import com.fasterxml.jackson.annotation.JsonTypeName;
-import com.google.common.base.Function;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Lists;
-import org.apache.druid.data.input.Firehose;
-import org.apache.druid.data.input.FirehoseFactory;
-import org.apache.druid.data.input.InputRow;
-import org.apache.druid.data.input.MapBasedInputRow;
-import org.apache.druid.data.input.impl.InputRowParser;
-import org.apache.druid.java.util.common.StringUtils;
-import org.apache.druid.java.util.common.logger.Logger;
-import twitter4j.ConnectionLifeCycleListener;
-import twitter4j.GeoLocation;
-import twitter4j.HashtagEntity;
-import twitter4j.StallWarning;
-import twitter4j.Status;
-import twitter4j.StatusDeletionNotice;
-import twitter4j.StatusListener;
-import twitter4j.TwitterStream;
-import twitter4j.TwitterStreamFactory;
-import twitter4j.User;
-
-import javax.annotation.Nullable;
-import java.io.File;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
-import java.util.concurrent.ArrayBlockingQueue;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.TimeUnit;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-/**
- * Twitter "spritzer" Firehose Factory named "twitzer".
- * Builds a Firehose that emits a stream of
- * ??
- * with timestamps along with ??.
- * The generated tuples have the form (timestamp, ????)
- * where the timestamp is from the twitter event.
- *
- * Example spec file:
- *
- * Example query using POST to /druid/v2/?w (where w is an arbitrary parameter and the date and time
- * is UTC):
- *
- * Notes on twitter.com HTTP (REST) API: v1.0 will be disabled around 2013-03 so v1.1 should be used;
- * twitter4j 3.0 (not yet released) will support the v1.1 api.
- * Specifically, we should be using https://stream.twitter.com/1.1/statuses/sample.json
- * See: http://jira.twitter4j.org/browse/TFJ-186
- *
- * Notes on JSON parsing: as of twitter4j 2.2.x, the json parser has some bugs (ex: Status.toString()
- * can have number format exceptions), so it might be necessary to extract raw json and process it
- * separately. If so, set twitter4.jsonStoreEnabled=true and look at DataObjectFactory#getRawJSON();
- * com.fasterxml.jackson.databind.ObjectMapper should be used to parse.
- */
-@JsonTypeName("twitzer")
-public class TwitterSpritzerFirehoseFactory implements FirehoseFactory
-{
- private static final Logger log = new Logger(TwitterSpritzerFirehoseFactory.class);
- private static final Pattern sourcePattern = Pattern.compile("]*>(.*?)", Pattern.CASE_INSENSITIVE);
- private static final int DEFAULT_QUEUE_SIZE = 2000;
-
- /**
- * max events to receive, -1 is infinite, 0 means nothing is delivered; use this to prevent
- * infinite space consumption or to prevent getting throttled at an inconvenient time
- * or to see what happens when a Firehose stops delivering
- * values, or to have hasMore() return false. The Twitter Spritzer can deliver about
- * 1000 events per minute.
- */
- private final int maxEventCount;
- /**
- * maximum number of minutes to fetch Twitter events. Use this to prevent getting
- * throttled at an inconvenient time. If zero or less, no time limit for run.
- */
- private final int maxRunMinutes;
-
- @JsonCreator
- public TwitterSpritzerFirehoseFactory(
- @JsonProperty("maxEventCount") Integer maxEventCount,
- @JsonProperty("maxRunMinutes") Integer maxRunMinutes
- )
- {
- this.maxEventCount = maxEventCount;
- this.maxRunMinutes = maxRunMinutes;
- log.info("maxEventCount=" + ((maxEventCount <= 0) ? "no limit" : maxEventCount));
- log.info("maxRunMinutes=" + ((maxRunMinutes <= 0) ? "no limit" : maxRunMinutes));
- }
-
- @Override
- public Firehose connect(InputRowParser parser, File temporaryDirectory)
- {
- final ConnectionLifeCycleListener connectionLifeCycleListener = new ConnectionLifeCycleListener()
- {
- @Override
- public void onConnect()
- {
- log.info("Connected_to_Twitter");
- }
-
- @Override
- public void onDisconnect()
- {
- log.info("Disconnect_from_Twitter");
- }
-
- /**
- * called before thread gets cleaned up
- */
- @Override
- public void onCleanUp()
- {
- log.info("Cleanup_twitter_stream");
- }
- }; // ConnectionLifeCycleListener
-
- final TwitterStream twitterStream;
- final StatusListener statusListener;
- /** This queue is used to move twitter events from the twitter4j thread to the druid ingest thread. */
- final BlockingQueue queue = new ArrayBlockingQueue(DEFAULT_QUEUE_SIZE);
- final long startMsec = System.currentTimeMillis();
-
- //
- // set up Twitter Spritzer
- //
- twitterStream = new TwitterStreamFactory().getInstance();
- twitterStream.addConnectionLifeCycleListener(connectionLifeCycleListener);
- statusListener = new StatusListener()
- { // This is what really gets called to deliver stuff from twitter4j
- @Override
- public void onStatus(Status status)
- {
- // time to stop?
- if (Thread.currentThread().isInterrupted()) {
- throw new RuntimeException("Interrupted, time to stop");
- }
- try {
- boolean success = queue.offer(status, 15L, TimeUnit.SECONDS);
- if (!success) {
- log.warn("queue too slow!");
- }
- }
- catch (InterruptedException e) {
- throw new RuntimeException("InterruptedException", e);
- }
- }
-
- @Override
- public void onDeletionNotice(StatusDeletionNotice statusDeletionNotice)
- {
- //log.info("Got a status deletion notice id:" + statusDeletionNotice.getStatusId());
- }
-
- @Override
- public void onTrackLimitationNotice(int numberOfLimitedStatuses)
- {
- // This notice will be sent each time a limited stream becomes unlimited.
- // If this number is high and or rapidly increasing, it is an indication that your predicate is too broad, and you should consider a predicate with higher selectivity.
- log.warn("Got track limitation notice:" + numberOfLimitedStatuses);
- }
-
- @Override
- public void onScrubGeo(long userId, long upToStatusId)
- {
- //log.info("Got scrub_geo event userId:" + userId + " upToStatusId:" + upToStatusId);
- }
-
- @Override
- public void onException(Exception ex)
- {
- log.error(ex, "Got exception");
- }
-
- @Override
- public void onStallWarning(StallWarning warning)
- {
- log.warn("Got stall warning: %s", warning);
- }
- };
-
- twitterStream.addListener(statusListener);
- twitterStream.sample(); // creates a generic StatusStream
- log.info("returned from sample()");
-
- return new Firehose()
- {
-
- private final Runnable doNothingRunnable = new Runnable()
- {
- @Override
- public void run()
- {
- }
- };
-
- private long rowCount = 0L;
- private boolean waitIfmax = (getMaxEventCount() < 0L);
- private final Map theMap = new TreeMap<>();
- // DIY json parsing // private final ObjectMapper omapper = new ObjectMapper();
-
- private boolean maxTimeReached()
- {
- if (getMaxRunMinutes() <= 0) {
- return false;
- } else {
- return (System.currentTimeMillis() - startMsec) / 60000L >= getMaxRunMinutes();
- }
- }
-
- private boolean maxCountReached()
- {
- return getMaxEventCount() >= 0 && rowCount >= getMaxEventCount();
- }
-
- @Override
- public boolean hasMore()
- {
- if (maxCountReached() || maxTimeReached()) {
- return waitIfmax;
- } else {
- return true;
- }
- }
-
- @Nullable
- @Override
- public InputRow nextRow()
- {
- // Interrupted to stop?
- if (Thread.currentThread().isInterrupted()) {
- throw new RuntimeException("Interrupted, time to stop");
- }
-
- // all done?
- if (maxCountReached() || maxTimeReached()) {
- if (waitIfmax) {
- // sleep a long time instead of terminating
- try {
- log.info("reached limit, sleeping a long time...");
- Thread.sleep(2000000000L);
- }
- catch (InterruptedException e) {
- throw new RuntimeException("InterruptedException", e);
- }
- } else {
- // allow this event through, and the next hasMore() call will be false
- }
- }
- if (++rowCount % 1000 == 0) {
- log.info("nextRow() has returned %,d InputRows", rowCount);
- }
-
- Status status;
- try {
- status = queue.take();
- }
- catch (InterruptedException e) {
- throw new RuntimeException("InterruptedException", e);
- }
-
- theMap.clear();
-
- HashtagEntity[] hts = status.getHashtagEntities();
- String text = status.getText();
- theMap.put("text", (null == text) ? "" : text);
- theMap.put(
- "htags", (hts.length > 0) ? Lists.transform(
- Arrays.asList(hts), new Function()
- {
- @Nullable
- @Override
- public String apply(HashtagEntity input)
- {
- return input.getText();
- }
- }
- ) : ImmutableList.of()
- );
-
- long[] lcontrobutors = status.getContributors();
- List contributors = new ArrayList<>();
- for (long contrib : lcontrobutors) {
- contributors.add(StringUtils.format("%d", contrib));
- }
- theMap.put("contributors", contributors);
-
- GeoLocation geoLocation = status.getGeoLocation();
- if (null != geoLocation) {
- double lat = status.getGeoLocation().getLatitude();
- double lon = status.getGeoLocation().getLongitude();
- theMap.put("lat", lat);
- theMap.put("lon", lon);
- } else {
- theMap.put("lat", null);
- theMap.put("lon", null);
- }
-
- if (status.getSource() != null) {
- Matcher m = sourcePattern.matcher(status.getSource());
- theMap.put("source", m.find() ? m.group(1) : status.getSource());
- }
-
- theMap.put("retweet", status.isRetweet());
-
- if (status.isRetweet()) {
- Status original = status.getRetweetedStatus();
- theMap.put("retweet_count", original.getRetweetCount());
-
- User originator = original.getUser();
- theMap.put("originator_screen_name", originator != null ? originator.getScreenName() : "");
- theMap.put("originator_follower_count", originator != null ? originator.getFollowersCount() : "");
- theMap.put("originator_friends_count", originator != null ? originator.getFriendsCount() : "");
- theMap.put("originator_verified", originator != null ? originator.isVerified() : "");
- }
-
- User user = status.getUser();
- final boolean hasUser = (null != user);
- theMap.put("follower_count", hasUser ? user.getFollowersCount() : 0);
- theMap.put("friends_count", hasUser ? user.getFriendsCount() : 0);
- theMap.put("lang", hasUser ? user.getLang() : "");
- theMap.put("utc_offset", hasUser ? user.getUtcOffset() : -1); // resolution in seconds, -1 if not available?
- theMap.put("statuses_count", hasUser ? user.getStatusesCount() : 0);
- theMap.put("user_id", hasUser ? StringUtils.format("%d", user.getId()) : "");
- theMap.put("screen_name", hasUser ? user.getScreenName() : "");
- theMap.put("location", hasUser ? user.getLocation() : "");
- theMap.put("verified", hasUser ? user.isVerified() : "");
-
- theMap.put("ts", status.getCreatedAt().getTime());
-
- List dimensions = Lists.newArrayList(theMap.keySet());
-
- return new MapBasedInputRow(status.getCreatedAt().getTime(), dimensions, theMap);
- }
-
- @Override
- public Runnable commit()
- {
- // ephemera in, ephemera out.
- return doNothingRunnable; // reuse the same object each time
- }
-
- @Override
- public void close()
- {
- log.info("CLOSE twitterstream");
- twitterStream.shutdown(); // invokes twitterStream.cleanUp()
- }
- };
- }
-
- @JsonProperty
- public int getMaxEventCount()
- {
- return maxEventCount;
- }
-
- @JsonProperty
- public int getMaxRunMinutes()
- {
- return maxRunMinutes;
- }
-}
diff --git a/examples/src/main/java/org/apache/druid/examples/wikipedia/IrcDecoder.java b/examples/src/main/java/org/apache/druid/examples/wikipedia/IrcDecoder.java
deleted file mode 100644
index e4adb1c1f42..00000000000
--- a/examples/src/main/java/org/apache/druid/examples/wikipedia/IrcDecoder.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.druid.examples.wikipedia;
-
-import com.fasterxml.jackson.annotation.JsonSubTypes;
-import com.fasterxml.jackson.annotation.JsonTypeInfo;
-import org.apache.druid.data.input.InputRow;
-import org.joda.time.DateTime;
-
-@JsonTypeInfo(use = JsonTypeInfo.Id.NAME, property = "type")
-@JsonSubTypes({
- @JsonSubTypes.Type(name = "wikipedia", value = WikipediaIrcDecoder.class)
-})
-public interface IrcDecoder
-{
- InputRow decodeMessage(DateTime timestamp, String channel, String msg);
-}
diff --git a/examples/src/main/java/org/apache/druid/examples/wikipedia/IrcFirehoseFactory.java b/examples/src/main/java/org/apache/druid/examples/wikipedia/IrcFirehoseFactory.java
deleted file mode 100644
index ab3f8069a46..00000000000
--- a/examples/src/main/java/org/apache/druid/examples/wikipedia/IrcFirehoseFactory.java
+++ /dev/null
@@ -1,256 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.druid.examples.wikipedia;
-
-import com.fasterxml.jackson.annotation.JsonCreator;
-import com.fasterxml.jackson.annotation.JsonProperty;
-import com.google.common.collect.Lists;
-import com.ircclouds.irc.api.Callback;
-import com.ircclouds.irc.api.IRCApi;
-import com.ircclouds.irc.api.IRCApiImpl;
-import com.ircclouds.irc.api.IServerParameters;
-import com.ircclouds.irc.api.domain.IRCServer;
-import com.ircclouds.irc.api.domain.messages.ChannelPrivMsg;
-import com.ircclouds.irc.api.listeners.VariousMessageListenerAdapter;
-import com.ircclouds.irc.api.state.IIRCState;
-import org.apache.druid.data.input.Firehose;
-import org.apache.druid.data.input.FirehoseFactory;
-import org.apache.druid.data.input.InputRow;
-import org.apache.druid.data.input.impl.InputRowParser;
-import org.apache.druid.java.util.common.DateTimes;
-import org.apache.druid.java.util.common.Pair;
-import org.apache.druid.java.util.common.logger.Logger;
-import org.joda.time.DateTime;
-
-import javax.annotation.Nullable;
-import java.io.File;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.List;
-import java.util.UUID;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.TimeUnit;
-
-/**
- *