mirror of https://github.com/apache/druid.git
Merge branch 'master' into select
Conflicts: processing/src/main/java/io/druid/query/Query.java server/src/main/java/io/druid/guice/QueryRunnerFactoryModule.java server/src/main/java/io/druid/guice/QueryToolChestModule.java server/src/main/java/io/druid/segment/realtime/plumber/RealtimePlumberSchool.java
This commit is contained in:
commit
2aabc6c4e6
2
build.sh
2
build.sh
|
@ -30,4 +30,4 @@ echo "For examples, see: "
|
|||
echo " "
|
||||
ls -1 examples/*/*sh
|
||||
echo " "
|
||||
echo "See also http://druid.io/docs/0.6.26"
|
||||
echo "See also http://druid.io/docs/0.6.46"
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
<parent>
|
||||
<groupId>io.druid</groupId>
|
||||
<artifactId>druid</artifactId>
|
||||
<version>0.6.27-SNAPSHOT</version>
|
||||
<version>0.6.48-SNAPSHOT</version>
|
||||
</parent>
|
||||
|
||||
<dependencies>
|
||||
|
@ -53,4 +53,20 @@
|
|||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<artifactId>maven-jar-plugin</artifactId>
|
||||
<configuration>
|
||||
<archive>
|
||||
<manifest>
|
||||
<addDefaultImplementationEntries>true</addDefaultImplementationEntries>
|
||||
<addDefaultSpecificationEntries>true</addDefaultSpecificationEntries>
|
||||
</manifest>
|
||||
</archive>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</project>
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
<parent>
|
||||
<groupId>io.druid</groupId>
|
||||
<artifactId>druid</artifactId>
|
||||
<version>0.6.27-SNAPSHOT</version>
|
||||
<version>0.6.48-SNAPSHOT</version>
|
||||
</parent>
|
||||
|
||||
<dependencies>
|
||||
|
@ -59,14 +59,6 @@
|
|||
<groupId>org.skife.config</groupId>
|
||||
<artifactId>config-magic</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.curator</groupId>
|
||||
<artifactId>curator-recipes</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.curator</groupId>
|
||||
<artifactId>curator-x-discovery</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.hibernate</groupId>
|
||||
<artifactId>hibernate-validator</artifactId>
|
||||
|
@ -75,10 +67,6 @@
|
|||
<groupId>javax.validation</groupId>
|
||||
<artifactId>validation-api</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>it.uniroma3.mat</groupId>
|
||||
<artifactId>extendedset</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.google.guava</groupId>
|
||||
<artifactId>guava</artifactId>
|
||||
|
@ -127,16 +115,6 @@
|
|||
<groupId>log4j</groupId>
|
||||
<artifactId>log4j</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>mysql</groupId>
|
||||
<artifactId>mysql-connector-java</artifactId>
|
||||
<version>5.1.18</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.mozilla</groupId>
|
||||
<artifactId>rhino</artifactId>
|
||||
<version>1.7R4</version>
|
||||
</dependency>
|
||||
|
||||
<!-- Tests -->
|
||||
<dependency>
|
||||
|
@ -168,7 +146,15 @@
|
|||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<configuration>
|
||||
<archive>
|
||||
<manifest>
|
||||
<addDefaultImplementationEntries>true</addDefaultImplementationEntries>
|
||||
<addDefaultSpecificationEntries>true</addDefaultSpecificationEntries>
|
||||
</manifest>
|
||||
</archive>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</project>
|
||||
|
|
|
@ -3,7 +3,7 @@ layout: doc_page
|
|||
---
|
||||
# Booting a Single Node Cluster #
|
||||
|
||||
[Loading Your Data](Tutorial%3A-Loading-Your-Data-Part-2.html) and [All About Queries](Tutorial%3A-All-About-Queries.html) contain recipes to boot a small druid cluster on localhost. Here we will boot a small cluster on EC2. You can checkout the code, or download a tarball from [here](http://static.druid.io/artifacts/druid-services-0.6.26-bin.tar.gz).
|
||||
[Loading Your Data](Tutorial%3A-Loading-Your-Data-Part-2.html) and [All About Queries](Tutorial%3A-All-About-Queries.html) contain recipes to boot a small druid cluster on localhost. Here we will boot a small cluster on EC2. You can checkout the code, or download a tarball from [here](http://static.druid.io/artifacts/druid-services-0.6.46-bin.tar.gz).
|
||||
|
||||
The [ec2 run script](https://github.com/metamx/druid/blob/master/examples/bin/run_ec2.sh), run_ec2.sh, is located at 'examples/bin' if you have checked out the code, or at the root of the project if you've downloaded a tarball. The scripts rely on the [Amazon EC2 API Tools](http://aws.amazon.com/developertools/351), and you will need to set three environment variables:
|
||||
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
---
|
||||
layout: doc_page
|
||||
---
|
||||
|
||||
# Setting Up a Druid Cluster
|
||||
|
||||
A Druid cluster consists of various node types that need to be set up depending on your use case. See our [Design](Design.html) docs for a description of the different node types.
|
||||
|
||||
Minimum Physical Layout: Absolute Minimum
|
||||
|
@ -74,7 +77,7 @@ Local disk ("ephemeral" on AWS EC2) for caching is recommended over network moun
|
|||
Setup
|
||||
-----
|
||||
|
||||
Setting up a cluster is essentially just firing up all of the nodes you want with the proper [[configuration]]. One thing to be aware of is that there are a few properties in the configuration that potentially need to be set individually for each process:
|
||||
Setting up a cluster is essentially just firing up all of the nodes you want with the proper [configuration](Configuration.html). One thing to be aware of is that there are a few properties in the configuration that potentially need to be set individually for each process:
|
||||
|
||||
```
|
||||
druid.server.type=historical|realtime
|
||||
|
|
|
@ -1,25 +1,28 @@
|
|||
---
|
||||
layout: doc_page
|
||||
---
|
||||
|
||||
# Configuring Druid
|
||||
|
||||
This describes the basic server configuration that is loaded by all the server processes; the same file is loaded by all. See also the json "specFile" descriptions in [Realtime](Realtime.html) and [Batch-ingestion](Batch-ingestion.html).
|
||||
|
||||
JVM Configuration Best Practices
|
||||
================================
|
||||
## JVM Configuration Best Practices
|
||||
|
||||
There are three JVM parameters that we set on all of our processes:
|
||||
|
||||
1. `-Duser.timezone=UTC` This sets the default timezone of the JVM to UTC. We always set this and do not test with other default timezones, so local timezones might work, but they also might uncover weird and interesting bugs
|
||||
2. `-Dfile.encoding=UTF-8` This is similar to timezone, we test assuming UTF-8. Local encodings might work, but they also might result in weird and interesting bugs
|
||||
3. `-Djava.io.tmpdir=<a path>` Various parts of the system that interact with the file system do it via temporary files, these files can get somewhat large. Many production systems are setup to have small (but fast) `/tmp` directories, these can be problematic with Druid so we recommend pointing the JVM’s tmp directory to something with a little more meat.
|
||||
1. `-Duser.timezone=UTC` This sets the default timezone of the JVM to UTC. We always set this and do not test with other default timezones, so local timezones might work, but they also might uncover weird and interesting bugs.
|
||||
2. `-Dfile.encoding=UTF-8` This is similar to timezone, we test assuming UTF-8. Local encodings might work, but they also might result in weird and interesting bugs.
|
||||
3. `-Djava.io.tmpdir=<a path>` Various parts of the system that interact with the file system do it via temporary files, and these files can get somewhat large. Many production systems are set up to have small (but fast) `/tmp` directories, which can be problematic with Druid so we recommend pointing the JVM’s tmp directory to something with a little more meat.
|
||||
|
||||
Modules
|
||||
=======
|
||||
## Modules
|
||||
|
||||
As of Druid v0.6, most core Druid functionality has been compartmentalized into modules. There are a set of default modules that may apply to any node type, and there are specific modules for the different node types. Default modules are __lazily instantiated__. Each module has its own set of configuration. This page will describe the configuration of the default modules.
|
||||
As of Druid v0.6, most core Druid functionality has been compartmentalized into modules. There are a set of default modules that may apply to any node type, and there are specific modules for the different node types. Default modules are __lazily instantiated__. Each module has its own set of configuration.
|
||||
|
||||
This page describes the configuration of the default modules. Node-specific configuration is discussed on each node's respective page. In addition, you can add custom modules to [extend Druid](Modules.html).
|
||||
|
||||
Configuration of the various modules is done via Java properties. These can either be provided as `-D` system properties on the java command line or they can be passed in via a file called `runtime.properties` that exists on the classpath.
|
||||
|
||||
Note: as a future item, we’d like to consolidate all of the various configuration into a yaml/JSON based configuration files.
|
||||
Note: as a future item, we’d like to consolidate all of the various configuration into a yaml/JSON based configuration file.
|
||||
|
||||
### Emitter Module
|
||||
|
||||
|
@ -147,7 +150,7 @@ Druid storage nodes maintain information about segments they have already downlo
|
|||
|
||||
|Property|Description|Default|
|
||||
|--------|-----------|-------|
|
||||
|`druid.segmentCache.locations`|Segments assigned to a historical node are first stored on the local file system and then served by the historical node. These locations defines where that local cache resides|none|
|
||||
|`druid.segmentCache.locations`|Segments assigned to a historical node are first stored on the local file system and then served by the historical node. These locations define where that local cache resides|none|
|
||||
|`druid.segmentCache.deleteOnRemove`|Delete segment files from cache once a node is no longer serving a segment.|true|
|
||||
|`druid.segmentCache.infoDir`|Historical nodes keep track of the segments they are serving so that when the process is restarted they can reload the same segments without waiting for the coordinator to reassign. This path defines where this metadata is kept. Directory will be created if needed.|${first_location}/info_dir|
|
||||
|
||||
|
@ -282,8 +285,10 @@ This deep storage is used to interface with Amazon's S3.
|
|||
|Property|Description|Default|
|
||||
|--------|-----------|-------|
|
||||
|`druid.storage.bucket`|S3 bucket name.|none|
|
||||
|`druid.storage.basekey`|S3 base key.|none|
|
||||
|`druid.storage.baseKey`|S3 object key prefix for storage.|none|
|
||||
|`druid.storage.disableAcl`|Boolean flag for ACL.|false|
|
||||
|`druid.storage.archiveBucket`|S3 bucket name for archiving when running the indexing-service *archive task*.|none|
|
||||
|`druid.storage.archiveBaseKey`|S3 object key prefix for archiving.|none|
|
||||
|
||||
#### HDFS Deep Storage
|
||||
|
||||
|
@ -308,21 +313,29 @@ This module is used to configure the [Indexing Service](Indexing-Service.html) t
|
|||
|
||||
|Property|Description|Default|
|
||||
|--------|-----------|-------|
|
||||
|`druid.indexer.logs.type`|Choices:noop, S3. Where to store task logs|noop|
|
||||
|`druid.indexer.logs.type`|Choices:noop, s3, file. Where to store task logs|file|
|
||||
|
||||
#### Noop Task Logs
|
||||
#### File Task Logs
|
||||
|
||||
No task logs are actually stored.
|
||||
Store task logs in the local filesystem.
|
||||
|
||||
|Property|Description|Default|
|
||||
|--------|-----------|-------|
|
||||
|`druid.indexer.logs.directory`|Local filesystem path.|log|
|
||||
|
||||
#### S3 Task Logs
|
||||
|
||||
Store Task Logs in S3.
|
||||
Store task logs in S3.
|
||||
|
||||
|Property|Description|Default|
|
||||
|--------|-----------|-------|
|
||||
|`druid.indexer.logs.s3Bucket`|S3 bucket name.|none|
|
||||
|`druid.indexer.logs.s3Prefix`|S3 key prefix.|none|
|
||||
|
||||
#### Noop Task Logs
|
||||
|
||||
No task logs are actually stored.
|
||||
|
||||
### Firehose Module
|
||||
|
||||
The Firehose module lists all available firehoses. There are no configurations.
|
||||
|
|
|
@ -19,13 +19,13 @@ Clone Druid and build it:
|
|||
git clone https://github.com/metamx/druid.git druid
|
||||
cd druid
|
||||
git fetch --tags
|
||||
git checkout druid-0.6.26
|
||||
git checkout druid-0.6.46
|
||||
./build.sh
|
||||
```
|
||||
|
||||
### Downloading the DSK (Druid Standalone Kit)
|
||||
|
||||
[Download](http://static.druid.io/artifacts/releases/druid-services-0.6.26-bin.tar.gz) a stand-alone tarball and run it:
|
||||
[Download](http://static.druid.io/artifacts/releases/druid-services-0.6.46-bin.tar.gz) a stand-alone tarball and run it:
|
||||
|
||||
``` bash
|
||||
tar -xzf druid-services-0.X.X-bin.tar.gz
|
||||
|
|
|
@ -56,6 +56,7 @@ With the following JVM configuration:
|
|||
-Ddruid.db.connector.password=diurd
|
||||
|
||||
-Ddruid.selectors.indexing.serviceName=overlord
|
||||
-Ddruid.indexer.queue.startDelay=PT0M
|
||||
-Ddruid.indexer.runner.javaOpts="-server -Xmx1g"
|
||||
-Ddruid.indexer.runner.startPort=8081
|
||||
-Ddruid.indexer.fork.property.druid.computation.buffer.size=268435456
|
||||
|
@ -110,12 +111,17 @@ If autoscaling is enabled, new middle managers may be added when a task has been
|
|||
|
||||
#### JVM Configuration
|
||||
|
||||
In addition to the configuration of some of the default modules in [Configuration](Configuration.html), the overlord module requires the following basic configs to run in remote mode:
|
||||
In addition to the configuration of some of the default modules in [Configuration](Configuration.html), the overlord has the following basic configs:
|
||||
|
||||
|Property|Description|Default|
|
||||
|--------|-----------|-------|
|
||||
|`druid.indexer.runner.type`|Choices "local" or "remote". Indicates whether tasks should be run locally or in a distributed environment.|local|
|
||||
|`druid.indexer.storage.type`|Choices are "local" or "db". Indicates whether incoming tasks should be stored locally (in heap) or in a database. Storing incoming tasks in a database allows for tasks to be bootstrapped if the overlord should fail.|local|
|
||||
|`druid.indexer.storage.type`|Choices are "local" or "db". Indicates whether incoming tasks should be stored locally (in heap) or in a database. Storing incoming tasks in a database allows for tasks to be resumed if the overlord should fail.|local|
|
||||
|`druid.indexer.storage.recentlyFinishedThreshold`|A duration of time to store task results.|PT24H|
|
||||
|`druid.indexer.queue.maxSize`|Maximum number of active tasks at one time.|Integer.MAX_VALUE|
|
||||
|`druid.indexer.queue.startDelay`|Sleep this long before starting overlord queue management. This can be useful to give a cluster time to re-orient itself after e.g. a widespread network issue.|PT1M|
|
||||
|`druid.indexer.queue.restartDelay`|Sleep this long when overlord queue management throws an exception before trying again.|PT30S|
|
||||
|`druid.indexer.queue.storageSyncRate`|Sync overlord state this often with an underlying task persistence mechanism.|PT1M|
|
||||
|
||||
The following configs only apply if the overlord is running in remote mode:
|
||||
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
---
|
||||
layout: doc_page
|
||||
---
|
||||
|
||||
# Extending Druid With Custom Modules
|
||||
|
||||
Druid version 0.6 introduces a new module system that allows for the addition of extensions at runtime.
|
||||
|
||||
## Specifying extensions
|
||||
|
@ -164,4 +167,4 @@ Adding new Jersey resources to a module requires calling the following code to b
|
|||
|
||||
```java
|
||||
Jerseys.addResource(binder, NewResource.class);
|
||||
```
|
||||
```
|
||||
|
|
|
@ -27,7 +27,7 @@ druid.host=localhost
|
|||
druid.service=realtime
|
||||
druid.port=8083
|
||||
|
||||
druid.extensions.coordinates=["io.druid.extensions:druid-kafka-seven:0.6.26"]
|
||||
druid.extensions.coordinates=["io.druid.extensions:druid-kafka-seven:0.6.46"]
|
||||
|
||||
|
||||
druid.zk.service.host=localhost
|
||||
|
|
|
@ -0,0 +1,45 @@
|
|||
---
|
||||
layout: doc_page
|
||||
---
|
||||
TopNMetricSpec
|
||||
==================
|
||||
|
||||
The topN metric spec specifies how topN values should be sorted.
|
||||
|
||||
## Numeric TopNMetricSpec
|
||||
|
||||
The simplest metric specification is a String value indicating the metric to sort topN results by. They are included in a topN query with:
|
||||
|
||||
```json
|
||||
"metric": <metric_value_string>
|
||||
```
|
||||
|
||||
The metric field can also be given as a JSON object. The grammar for dimension values sorted by numeric value is shown below:
|
||||
|
||||
```json
|
||||
"metric": {
|
||||
"type": "numeric",
|
||||
"metric": "<metric_value>"
|
||||
}
|
||||
```
|
||||
|
||||
|property|description|required?|
|
||||
|--------|-----------|---------|
|
||||
|type|this indicates a numeric sort|yes|
|
||||
|metric|the actual metric field in which results will be sorted by|yes|
|
||||
|
||||
## Lexicographic TopNMetricSpec
|
||||
|
||||
The grammar for dimension values sorted lexicographically is as follows:
|
||||
|
||||
```json
|
||||
"metric": {
|
||||
"type": "lexicographic",
|
||||
"previousStop": "<previousStop_value>"
|
||||
}
|
||||
```
|
||||
|
||||
|property|description|required?|
|
||||
|--------|-----------|---------|
|
||||
|type|this indicates a lexicographic sort|yes|
|
||||
|previousStop|the starting point of the lexicographic sort. For example, if a previousStop value is 'b', all values before 'b' are discarded. This field can be used to paginate through all the dimension values.|no|
|
|
@ -0,0 +1,119 @@
|
|||
---
|
||||
layout: doc_page
|
||||
---
|
||||
TopN queries
|
||||
==================
|
||||
|
||||
TopN queries return a sorted set of results for the values in a given dimension according to some criteria. Conceptually, they can be thought of as an approximate [GroupByQuery](GroupByQuery.html) over a single dimension with an [Ordering](Ordering.html) spec. TopNs are much faster and resource efficient than GroupBys for this use case. These types of queries take a topN query object and return an array of JSON objects where each object represents a value asked for by the topN query.
|
||||
|
||||
A topN query object looks like:
|
||||
|
||||
```json
|
||||
"queryType": "topN",
|
||||
"dataSource": "sample_data",
|
||||
"dimension": "sample_dim",
|
||||
"threshold": 5,
|
||||
"metric": "count",
|
||||
"granularity": "all",
|
||||
"filter": {
|
||||
"type": "and",
|
||||
"fields": [
|
||||
{
|
||||
"type": "selector",
|
||||
"dimension": "dim1",
|
||||
"value": "some_value"
|
||||
},
|
||||
{
|
||||
"type": "selector",
|
||||
"dimension": "dim2",
|
||||
"value": "some_other_val"
|
||||
}
|
||||
]
|
||||
},
|
||||
"aggregations": [
|
||||
{
|
||||
"type": "longSum",
|
||||
"name": "count",
|
||||
"fieldName": "count"
|
||||
},
|
||||
{
|
||||
"type": "doubleSum",
|
||||
"name": "some_metric",
|
||||
"fieldName": "some_metric"
|
||||
}
|
||||
],
|
||||
"postAggregations": [
|
||||
{
|
||||
"type": "arithmetic",
|
||||
"name": "sample_divide",
|
||||
"fn": "/",
|
||||
"fields": [
|
||||
{
|
||||
"type": "fieldAccess",
|
||||
"name": "some_metric",
|
||||
"fieldName": "some_metric"
|
||||
},
|
||||
{
|
||||
"type": "fieldAccess",
|
||||
"name": "count",
|
||||
"fieldName": "count"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"intervals": [
|
||||
"2013-08-31T00:00:00.000/2013-09-03T00:00:00.000"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
There are 10 parts to a topN query, but 7 of them are shared with [TimeseriesQuery](TimeseriesQuery.html). Please review [TimeseriesQuery](TimeseriesQuery.html) for meanings of fields not defined below.
|
||||
|
||||
|property|description|required?|
|
||||
|--------|-----------|---------|
|
||||
|dimension|A JSON object defining the dimension that you want the top taken for. For more info, see [DimensionSpecs](DimensionSpecs.html)|yes|
|
||||
|threshold|An integer defining the N in the topN (i.e. how many you want in the top list)|yes|
|
||||
|metric|A JSON object specifying the metric to sort by for the top list. For more info, see [TopNMetricSpec](TopNMetricSpec.html).|yes|
|
||||
|
||||
Please note the context JSON object is also available for topN queries and should be used with the same caution as the timeseries case.
|
||||
The format of the results would look like so:
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"timestamp": "2013-08-31T00:00:00.000Z",
|
||||
"result": [
|
||||
{
|
||||
"dim1": "dim1_val",
|
||||
"count": 111,
|
||||
"some_metrics": 10669,
|
||||
"average": 96.11711711711712
|
||||
},
|
||||
{
|
||||
"dim1": "another_dim1_val",
|
||||
"count": 88,
|
||||
"some_metrics": 28344,
|
||||
"average": 322.09090909090907
|
||||
},
|
||||
{
|
||||
"dim1": "dim1_val3",
|
||||
"count": 70,
|
||||
"some_metrics": 871,
|
||||
"average": 12.442857142857143
|
||||
},
|
||||
{
|
||||
"dim1": "dim1_val4",
|
||||
"count": 62,
|
||||
"some_metrics": 815,
|
||||
"average": 13.14516129032258
|
||||
},
|
||||
{
|
||||
"dim1": "dim1_val5",
|
||||
"count": 60,
|
||||
"some_metrics": 2787,
|
||||
"average": 46.45
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
```
|
|
@ -49,7 +49,7 @@ There are two ways to setup Druid: download a tarball, or [Build From Source](Bu
|
|||
|
||||
### Download a Tarball
|
||||
|
||||
We've built a tarball that contains everything you'll need. You'll find it [here](http://static.druid.io/artifacts/releases/druid-services-0.6.26-bin.tar.gz). Download this file to a directory of your choosing.
|
||||
We've built a tarball that contains everything you'll need. You'll find it [here](http://static.druid.io/artifacts/releases/druid-services-0.6.46-bin.tar.gz). Download this file to a directory of your choosing.
|
||||
|
||||
You can extract the awesomeness within by issuing:
|
||||
|
||||
|
@ -60,7 +60,7 @@ tar -zxvf druid-services-*-bin.tar.gz
|
|||
Not too lost so far right? That's great! If you cd into the directory:
|
||||
|
||||
```
|
||||
cd druid-services-0.6.26
|
||||
cd druid-services-0.6.46
|
||||
```
|
||||
|
||||
You should see a bunch of files:
|
||||
|
@ -205,7 +205,7 @@ You are probably wondering, what are these [Granularities](Granularities.html) a
|
|||
To issue the query and get some results, run the following in your command line:
|
||||
|
||||
```
|
||||
curl -X POST 'http://localhost:8083/druid/v2/?pretty' -H 'content-type: application/json' -d ````timeseries_query.body
|
||||
curl -X POST 'http://localhost:8083/druid/v2/?pretty' -H 'content-type: application/json' -d @timeseries_query.body
|
||||
```
|
||||
|
||||
Once again, you should get a JSON blob of text back with your results, that looks something like this:
|
||||
|
|
|
@ -94,6 +94,7 @@ druid.db.connector.user=druid
|
|||
druid.db.connector.password=diurd
|
||||
|
||||
druid.selectors.indexing.serviceName=overlord
|
||||
druid.indexer.queue.startDelay=PT0M
|
||||
druid.indexer.runner.javaOpts="-server -Xmx1g"
|
||||
druid.indexer.runner.startPort=8088
|
||||
druid.indexer.fork.property.druid.computation.buffer.size=268435456
|
||||
|
@ -246,6 +247,23 @@ Issuing a [TimeBoundaryQuery](TimeBoundaryQuery.html) should yield:
|
|||
} ]
|
||||
```
|
||||
|
||||
Console
|
||||
--------
|
||||
|
||||
The indexing service overlord has a console located at:
|
||||
|
||||
```bash
|
||||
localhost:8087/console.html
|
||||
```
|
||||
|
||||
On this console, you can look at statuses and logs of recently submitted and completed tasks.
|
||||
|
||||
If you decide to reuse the local firehose to ingest your own data and if you run into problems, you can use the console to read the individual task logs.
|
||||
|
||||
Task logs can be stored locally or uploaded to [Deep Storage](Deep-Storage.html). More information about how to configure this is [here](Configuration.html).
|
||||
|
||||
Most common data ingestion problems are around timestamp formats and other malformed data issues.
|
||||
|
||||
Next Steps
|
||||
----------
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ With real-world data, we recommend having a message bus such as [Apache Kafka](h
|
|||
|
||||
#### Setting up Kafka
|
||||
|
||||
[KafkaFirehoseFactory](https://github.com/metamx/druid/blob/druid-0.6.26/realtime/src/main/java/com/metamx/druid/realtime/firehose/KafkaFirehoseFactory.java) is how druid communicates with Kafka. Using this [Firehose](Firehose.html) with the right configuration, we can import data into Druid in real-time without writing any code. To load data to a real-time node via Kafka, we'll first need to initialize Zookeeper and Kafka, and then configure and initialize a [Realtime](Realtime.html) node.
|
||||
[KafkaFirehoseFactory](https://github.com/metamx/druid/blob/druid-0.6.46/realtime/src/main/java/com/metamx/druid/realtime/firehose/KafkaFirehoseFactory.java) is how druid communicates with Kafka. Using this [Firehose](Firehose.html) with the right configuration, we can import data into Druid in real-time without writing any code. To load data to a real-time node via Kafka, we'll first need to initialize Zookeeper and Kafka, and then configure and initialize a [Realtime](Realtime.html) node.
|
||||
|
||||
Instructions for booting a Zookeeper and then Kafka cluster are available [here](http://kafka.apache.org/07/quickstart.html).
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@ In this tutorial, we will set up other types of Druid nodes as well as and exter
|
|||
|
||||
If you followed the first tutorial, you should already have Druid downloaded. If not, let's go back and do that first.
|
||||
|
||||
You can download the latest version of druid [here](http://static.druid.io/artifacts/releases/druid-services-0.6.26-bin.tar.gz)
|
||||
You can download the latest version of druid [here](http://static.druid.io/artifacts/releases/druid-services-0.6.46-bin.tar.gz)
|
||||
|
||||
and untar the contents within by issuing:
|
||||
|
||||
|
@ -149,7 +149,7 @@ druid.port=8081
|
|||
|
||||
druid.zk.service.host=localhost
|
||||
|
||||
druid.extensions.coordinates=["io.druid.extensions:druid-s3-extensions:0.6.26"]
|
||||
druid.extensions.coordinates=["io.druid.extensions:druid-s3-extensions:0.6.46"]
|
||||
|
||||
# Dummy read only AWS account (used to download example data)
|
||||
druid.s3.secretKey=QyyfVZ7llSiRg6Qcrql1eEUG7buFpAK6T6engr1b
|
||||
|
@ -238,7 +238,7 @@ druid.port=8083
|
|||
|
||||
druid.zk.service.host=localhost
|
||||
|
||||
druid.extensions.coordinates=["io.druid.extensions:druid-examples:0.6.26","io.druid.extensions:druid-kafka-seven:0.6.26"]
|
||||
druid.extensions.coordinates=["io.druid.extensions:druid-examples:0.6.46","io.druid.extensions:druid-kafka-seven:0.6.46"]
|
||||
|
||||
# Change this config to db to hand off to the rest of the Druid cluster
|
||||
druid.publish.type=noop
|
||||
|
@ -253,5 +253,5 @@ druid.processing.buffer.sizeBytes=10000000
|
|||
|
||||
Next Steps
|
||||
----------
|
||||
If you are intested in how data flows through the different Druid components, check out the [Druid data flow architecture](Design.html). Now that you have an understanding of what the Druid cluster looks like, why not load some of your own data?
|
||||
If you are interested in how data flows through the different Druid components, check out the [Druid data flow architecture](Design.html). Now that you have an understanding of what the Druid cluster looks like, why not load some of your own data?
|
||||
Check out the next [tutorial](Tutorial%3A-Loading-Your-Data-Part-1.html) section for more info!
|
||||
|
|
|
@ -37,7 +37,7 @@ There are two ways to setup Druid: download a tarball, or [Build From Source](Bu
|
|||
|
||||
h3. Download a Tarball
|
||||
|
||||
We've built a tarball that contains everything you'll need. You'll find it [here](http://static.druid.io/artifacts/releases/druid-services-0.6.26-bin.tar.gz)
|
||||
We've built a tarball that contains everything you'll need. You'll find it [here](http://static.druid.io/artifacts/releases/druid-services-0.6.46-bin.tar.gz)
|
||||
Download this file to a directory of your choosing.
|
||||
You can extract the awesomeness within by issuing:
|
||||
|
||||
|
@ -48,7 +48,7 @@ tar zxvf druid-services-*-bin.tar.gz
|
|||
Not too lost so far right? That's great! If you cd into the directory:
|
||||
|
||||
```
|
||||
cd druid-services-0.6.26
|
||||
cd druid-services-0.6.46
|
||||
```
|
||||
|
||||
You should see a bunch of files:
|
||||
|
|
|
@ -9,7 +9,7 @@ There are two ways to setup Druid: download a tarball, or build it from source.
|
|||
|
||||
h3. Download a Tarball
|
||||
|
||||
We've built a tarball that contains everything you'll need. You'll find it "here":http://static.druid.io/artifacts/releases/druid-services-0.6.26-bin.tar.gz.
|
||||
We've built a tarball that contains everything you'll need. You'll find it "here":http://static.druid.io/artifacts/releases/druid-services-0.6.46-bin.tar.gz.
|
||||
Download this bad boy to a directory of your choosing.
|
||||
|
||||
You can extract the awesomeness within by issuing:
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
<link rel="stylesheet" href="css/toc.css">
|
||||
|
||||
h1. Introduction
|
||||
h2. Introduction
|
||||
* "About Druid":./
|
||||
* "Concepts and Terminology":./Concepts-and-Terminology.html
|
||||
|
||||
|
@ -14,16 +14,12 @@ h2. Getting Started
|
|||
* "Tutorial: Loading Your Data Part 2":./Tutorial:-Loading-Your-Data-Part-2.html
|
||||
* "Tutorial: All About Queries":./Tutorial:-All-About-Queries.html
|
||||
|
||||
h2. Evaluate Druid
|
||||
h2. Operations
|
||||
* "Configuration":Configuration.html
|
||||
* "Extending Druid":./Modules.html
|
||||
* "Cluster Setup":./Cluster-setup.html
|
||||
* "Booting a Production Cluster":./Booting-a-production-cluster.html
|
||||
|
||||
h2. Configuration
|
||||
* "Configuration":Configuration.html
|
||||
|
||||
h2. Extend Druid
|
||||
* "Modules":./Modules.html
|
||||
|
||||
h2. Data Ingestion
|
||||
* "Realtime":./Realtime.html
|
||||
* "Batch":./Batch-ingestion.html
|
||||
|
@ -46,6 +42,8 @@ h2. Querying
|
|||
** "SegmentMetadataQuery":./SegmentMetadataQuery.html
|
||||
** "TimeBoundaryQuery":./TimeBoundaryQuery.html
|
||||
** "TimeseriesQuery":./TimeseriesQuery.html
|
||||
** "TopNQuery":./TopNQuery.html
|
||||
*** "TopNMetricSpec":./TopNMetricSpec.html
|
||||
|
||||
h2. Architecture
|
||||
* "Design":./Design.html
|
||||
|
|
|
@ -4,7 +4,7 @@ druid.port=8081
|
|||
|
||||
druid.zk.service.host=localhost
|
||||
|
||||
druid.extensions.coordinates=["io.druid.extensions:druid-s3-extensions:0.6.26"]
|
||||
druid.extensions.coordinates=["io.druid.extensions:druid-s3-extensions:0.6.46"]
|
||||
|
||||
# Dummy read only AWS account (used to download example data)
|
||||
druid.s3.secretKey=QyyfVZ7llSiRg6Qcrql1eEUG7buFpAK6T6engr1b
|
||||
|
|
|
@ -9,6 +9,7 @@ druid.db.connector.user=druid
|
|||
druid.db.connector.password=diurd
|
||||
|
||||
druid.selectors.indexing.serviceName=overlord
|
||||
druid.indexer.queue.startDelay=PT0M
|
||||
druid.indexer.runner.javaOpts="-server -Xmx1g"
|
||||
druid.indexer.runner.startPort=8088
|
||||
druid.indexer.fork.property.druid.computation.buffer.size=268435456
|
||||
|
|
|
@ -4,7 +4,7 @@ druid.port=8083
|
|||
|
||||
druid.zk.service.host=localhost
|
||||
|
||||
druid.extensions.coordinates=["io.druid.extensions:druid-examples:0.6.26","io.druid.extensions:druid-kafka-seven:0.6.26","io.druid.extensions:druid-rabbitmq:0.6.26"]
|
||||
druid.extensions.coordinates=["io.druid.extensions:druid-examples:0.6.46","io.druid.extensions:druid-kafka-seven:0.6.46","io.druid.extensions:druid-rabbitmq:0.6.46"]
|
||||
|
||||
# Change this config to db to hand off to the rest of the Druid cluster
|
||||
druid.publish.type=noop
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
<parent>
|
||||
<groupId>io.druid</groupId>
|
||||
<artifactId>druid</artifactId>
|
||||
<version>0.6.27-SNAPSHOT</version>
|
||||
<version>0.6.48-SNAPSHOT</version>
|
||||
</parent>
|
||||
|
||||
<dependencies>
|
||||
|
@ -104,6 +104,14 @@
|
|||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
<configuration>
|
||||
<archive>
|
||||
<manifest>
|
||||
<addDefaultImplementationEntries>true</addDefaultImplementationEntries>
|
||||
<addDefaultSpecificationEntries>true</addDefaultSpecificationEntries>
|
||||
</manifest>
|
||||
</archive>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
<parent>
|
||||
<groupId>io.druid</groupId>
|
||||
<artifactId>druid</artifactId>
|
||||
<version>0.6.27-SNAPSHOT</version>
|
||||
<version>0.6.48-SNAPSHOT</version>
|
||||
</parent>
|
||||
|
||||
<dependencies>
|
||||
|
@ -71,4 +71,20 @@
|
|||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<artifactId>maven-jar-plugin</artifactId>
|
||||
<configuration>
|
||||
<archive>
|
||||
<manifest>
|
||||
<addDefaultImplementationEntries>true</addDefaultImplementationEntries>
|
||||
<addDefaultSpecificationEntries>true</addDefaultSpecificationEntries>
|
||||
</manifest>
|
||||
</archive>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</project>
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
<parent>
|
||||
<groupId>io.druid</groupId>
|
||||
<artifactId>druid</artifactId>
|
||||
<version>0.6.27-SNAPSHOT</version>
|
||||
<version>0.6.48-SNAPSHOT</version>
|
||||
</parent>
|
||||
|
||||
<dependencies>
|
||||
|
@ -101,6 +101,17 @@
|
|||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<artifactId>maven-jar-plugin</artifactId>
|
||||
<configuration>
|
||||
<archive>
|
||||
<manifest>
|
||||
<addDefaultImplementationEntries>true</addDefaultImplementationEntries>
|
||||
<addDefaultSpecificationEntries>true</addDefaultSpecificationEntries>
|
||||
</manifest>
|
||||
</archive>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<artifactId>maven-shade-plugin</artifactId>
|
||||
<executions>
|
||||
|
|
|
@ -62,7 +62,7 @@ public abstract class HadoopDruidIndexerMapper<KEYOUT, VALUEOUT> extends Mapper<
|
|||
try {
|
||||
inputRow = parser.parse(value.toString());
|
||||
}
|
||||
catch (IllegalArgumentException e) {
|
||||
catch (Exception e) {
|
||||
if (config.isIgnoreInvalidRows()) {
|
||||
context.getCounter(HadoopDruidIndexerConfig.IndexJobCounters.INVALID_ROW_COUNTER).increment(1);
|
||||
return; // we're ignoring this invalid row
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
<parent>
|
||||
<groupId>io.druid</groupId>
|
||||
<artifactId>druid</artifactId>
|
||||
<version>0.6.27-SNAPSHOT</version>
|
||||
<version>0.6.48-SNAPSHOT</version>
|
||||
</parent>
|
||||
|
||||
<dependencies>
|
||||
|
@ -47,95 +47,10 @@
|
|||
<artifactId>druid-indexing-hadoop</artifactId>
|
||||
<version>${project.parent.version}</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>com.metamx</groupId>
|
||||
<artifactId>emitter</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.metamx</groupId>
|
||||
<artifactId>http-client</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.metamx</groupId>
|
||||
<artifactId>java-util</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.metamx</groupId>
|
||||
<artifactId>server-metrics</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>commons-codec</groupId>
|
||||
<artifactId>commons-codec</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-io</groupId>
|
||||
<artifactId>commons-io</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.skife.config</groupId>
|
||||
<artifactId>config-magic</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.curator</groupId>
|
||||
<artifactId>curator-framework</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.curator</groupId>
|
||||
<artifactId>curator-recipes</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.google.guava</groupId>
|
||||
<artifactId>guava</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.google.inject</groupId>
|
||||
<artifactId>guice</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.google.inject.extensions</groupId>
|
||||
<artifactId>guice-servlet</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.fasterxml.jackson.core</groupId>
|
||||
<artifactId>jackson-core</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.fasterxml.jackson.jaxrs</groupId>
|
||||
<artifactId>jackson-jaxrs-json-provider</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.fasterxml.jackson.core</groupId>
|
||||
<artifactId>jackson-databind</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>javax.inject</groupId>
|
||||
<artifactId>javax.inject</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.jdbi</groupId>
|
||||
<artifactId>jdbi</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.sun.jersey</groupId>
|
||||
<artifactId>jersey-core</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.sun.jersey.contribs</groupId>
|
||||
<artifactId>jersey-guice</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.eclipse.jetty</groupId>
|
||||
<artifactId>jetty-server</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>joda-time</groupId>
|
||||
<artifactId>joda-time</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.google.code.findbugs</groupId>
|
||||
<artifactId>jsr305</artifactId>
|
||||
<groupId>mysql</groupId>
|
||||
<artifactId>mysql-connector-java</artifactId>
|
||||
<version>5.1.18</version>
|
||||
</dependency>
|
||||
|
||||
<!-- Tests -->
|
||||
|
@ -160,4 +75,20 @@
|
|||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<artifactId>maven-jar-plugin</artifactId>
|
||||
<configuration>
|
||||
<archive>
|
||||
<manifest>
|
||||
<addDefaultImplementationEntries>true</addDefaultImplementationEntries>
|
||||
<addDefaultSpecificationEntries>true</addDefaultSpecificationEntries>
|
||||
</manifest>
|
||||
</archive>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</project>
|
||||
|
|
|
@ -24,7 +24,6 @@ import com.fasterxml.jackson.databind.jsontype.NamedType;
|
|||
import com.fasterxml.jackson.databind.module.SimpleModule;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.inject.Binder;
|
||||
import io.druid.indexing.common.config.EventReceiverFirehoseFactoryConfig;
|
||||
import io.druid.indexing.common.index.EventReceiverFirehoseFactory;
|
||||
import io.druid.initialization.DruidModule;
|
||||
|
||||
|
@ -46,7 +45,5 @@ public class IndexingServiceFirehoseModule implements DruidModule
|
|||
@Override
|
||||
public void configure(Binder binder)
|
||||
{
|
||||
// backwards compatibility
|
||||
ConfigProvider.bind(binder, EventReceiverFirehoseFactoryConfig.class);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,22 +23,27 @@ import com.google.inject.Binder;
|
|||
import com.google.inject.Key;
|
||||
import com.google.inject.Module;
|
||||
import com.google.inject.multibindings.MapBinder;
|
||||
import io.druid.indexing.common.config.FileTaskLogsConfig;
|
||||
import io.druid.indexing.common.tasklogs.FileTaskLogs;
|
||||
import io.druid.tasklogs.NoopTaskLogs;
|
||||
import io.druid.tasklogs.TaskLogPusher;
|
||||
import io.druid.tasklogs.TaskLogs;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class TaskLogsModule implements Module
|
||||
public class IndexingServiceTaskLogsModule implements Module
|
||||
{
|
||||
@Override
|
||||
public void configure(Binder binder)
|
||||
{
|
||||
PolyBind.createChoice(binder, "druid.indexer.logs.type", Key.get(TaskLogs.class), Key.get(NoopTaskLogs.class));
|
||||
PolyBind.createChoice(binder, "druid.indexer.logs.type", Key.get(TaskLogs.class), Key.get(FileTaskLogs.class));
|
||||
|
||||
final MapBinder<String, TaskLogs> taskLogBinder = Binders.taskLogsBinder(binder);
|
||||
taskLogBinder.addBinding("noop").to(NoopTaskLogs.class).in(LazySingleton.class);
|
||||
taskLogBinder.addBinding("file").to(FileTaskLogs.class).in(LazySingleton.class);
|
||||
binder.bind(NoopTaskLogs.class).in(LazySingleton.class);
|
||||
binder.bind(FileTaskLogs.class).in(LazySingleton.class);
|
||||
JsonConfigProvider.bind(binder, "druid.indexer.logs", FileTaskLogsConfig.class);
|
||||
|
||||
binder.bind(TaskLogPusher.class).to(TaskLogs.class);
|
||||
}
|
|
@ -20,25 +20,36 @@
|
|||
package io.druid.indexing.common;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.common.base.Function;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.collect.Maps;
|
||||
import com.google.common.collect.Multimap;
|
||||
import com.google.common.collect.Multimaps;
|
||||
import com.metamx.emitter.service.ServiceEmitter;
|
||||
import com.metamx.metrics.MonitorScheduler;
|
||||
import io.druid.client.ServerView;
|
||||
import io.druid.indexing.common.actions.SegmentInsertAction;
|
||||
import io.druid.indexing.common.actions.TaskActionClient;
|
||||
import io.druid.indexing.common.actions.TaskActionClientFactory;
|
||||
import io.druid.indexing.common.config.TaskConfig;
|
||||
import io.druid.indexing.common.task.Task;
|
||||
import io.druid.query.QueryRunnerFactoryConglomerate;
|
||||
import io.druid.segment.loading.DataSegmentArchiver;
|
||||
import io.druid.segment.loading.DataSegmentKiller;
|
||||
import io.druid.segment.loading.DataSegmentMover;
|
||||
import io.druid.segment.loading.DataSegmentPusher;
|
||||
import io.druid.segment.loading.SegmentLoader;
|
||||
import io.druid.segment.loading.SegmentLoadingException;
|
||||
import io.druid.server.coordination.DataSegmentAnnouncer;
|
||||
import io.druid.timeline.DataSegment;
|
||||
import org.joda.time.Interval;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
|
||||
/**
|
||||
|
@ -52,6 +63,8 @@ public class TaskToolbox
|
|||
private final ServiceEmitter emitter;
|
||||
private final DataSegmentPusher segmentPusher;
|
||||
private final DataSegmentKiller dataSegmentKiller;
|
||||
private final DataSegmentArchiver dataSegmentArchiver;
|
||||
private final DataSegmentMover dataSegmentMover;
|
||||
private final DataSegmentAnnouncer segmentAnnouncer;
|
||||
private final ServerView newSegmentServerView;
|
||||
private final QueryRunnerFactoryConglomerate queryRunnerFactoryConglomerate;
|
||||
|
@ -68,6 +81,8 @@ public class TaskToolbox
|
|||
ServiceEmitter emitter,
|
||||
DataSegmentPusher segmentPusher,
|
||||
DataSegmentKiller dataSegmentKiller,
|
||||
DataSegmentMover dataSegmentMover,
|
||||
DataSegmentArchiver dataSegmentArchiver,
|
||||
DataSegmentAnnouncer segmentAnnouncer,
|
||||
ServerView newSegmentServerView,
|
||||
QueryRunnerFactoryConglomerate queryRunnerFactoryConglomerate,
|
||||
|
@ -84,6 +99,8 @@ public class TaskToolbox
|
|||
this.emitter = emitter;
|
||||
this.segmentPusher = segmentPusher;
|
||||
this.dataSegmentKiller = dataSegmentKiller;
|
||||
this.dataSegmentMover = dataSegmentMover;
|
||||
this.dataSegmentArchiver = dataSegmentArchiver;
|
||||
this.segmentAnnouncer = segmentAnnouncer;
|
||||
this.newSegmentServerView = newSegmentServerView;
|
||||
this.queryRunnerFactoryConglomerate = queryRunnerFactoryConglomerate;
|
||||
|
@ -119,6 +136,16 @@ public class TaskToolbox
|
|||
return dataSegmentKiller;
|
||||
}
|
||||
|
||||
public DataSegmentMover getDataSegmentMover()
|
||||
{
|
||||
return dataSegmentMover;
|
||||
}
|
||||
|
||||
public DataSegmentArchiver getDataSegmentArchiver()
|
||||
{
|
||||
return dataSegmentArchiver;
|
||||
}
|
||||
|
||||
public DataSegmentAnnouncer getSegmentAnnouncer()
|
||||
{
|
||||
return segmentAnnouncer;
|
||||
|
@ -149,7 +176,7 @@ public class TaskToolbox
|
|||
return objectMapper;
|
||||
}
|
||||
|
||||
public Map<DataSegment, File> getSegments(List<DataSegment> segments)
|
||||
public Map<DataSegment, File> fetchSegments(List<DataSegment> segments)
|
||||
throws SegmentLoadingException
|
||||
{
|
||||
Map<DataSegment, File> retVal = Maps.newLinkedHashMap();
|
||||
|
@ -160,6 +187,25 @@ public class TaskToolbox
|
|||
return retVal;
|
||||
}
|
||||
|
||||
public void pushSegments(Iterable<DataSegment> segments) throws IOException {
|
||||
// Request segment pushes for each set
|
||||
final Multimap<Interval, DataSegment> segmentMultimap = Multimaps.index(
|
||||
segments,
|
||||
new Function<DataSegment, Interval>()
|
||||
{
|
||||
@Override
|
||||
public Interval apply(DataSegment segment)
|
||||
{
|
||||
return segment.getInterval();
|
||||
}
|
||||
}
|
||||
);
|
||||
for (final Collection<DataSegment> segmentCollection : segmentMultimap.asMap().values()) {
|
||||
getTaskActionClient().submit(new SegmentInsertAction(ImmutableSet.copyOf(segmentCollection)));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public File getTaskWorkDir()
|
||||
{
|
||||
return taskWorkDir;
|
||||
|
|
|
@ -24,13 +24,14 @@ import com.google.inject.Inject;
|
|||
import com.metamx.emitter.service.ServiceEmitter;
|
||||
import com.metamx.metrics.MonitorScheduler;
|
||||
import io.druid.client.ServerView;
|
||||
import io.druid.guice.annotations.Json;
|
||||
import io.druid.guice.annotations.Processing;
|
||||
import io.druid.indexing.common.actions.TaskActionClientFactory;
|
||||
import io.druid.indexing.common.config.TaskConfig;
|
||||
import io.druid.indexing.common.task.Task;
|
||||
import io.druid.query.QueryRunnerFactoryConglomerate;
|
||||
import io.druid.segment.loading.DataSegmentArchiver;
|
||||
import io.druid.segment.loading.DataSegmentKiller;
|
||||
import io.druid.segment.loading.DataSegmentMover;
|
||||
import io.druid.segment.loading.DataSegmentPusher;
|
||||
import io.druid.server.coordination.DataSegmentAnnouncer;
|
||||
|
||||
|
@ -47,6 +48,8 @@ public class TaskToolboxFactory
|
|||
private final ServiceEmitter emitter;
|
||||
private final DataSegmentPusher segmentPusher;
|
||||
private final DataSegmentKiller dataSegmentKiller;
|
||||
private final DataSegmentMover dataSegmentMover;
|
||||
private final DataSegmentArchiver dataSegmentArchiver;
|
||||
private final DataSegmentAnnouncer segmentAnnouncer;
|
||||
private final ServerView newSegmentServerView;
|
||||
private final QueryRunnerFactoryConglomerate queryRunnerFactoryConglomerate;
|
||||
|
@ -62,6 +65,8 @@ public class TaskToolboxFactory
|
|||
ServiceEmitter emitter,
|
||||
DataSegmentPusher segmentPusher,
|
||||
DataSegmentKiller dataSegmentKiller,
|
||||
DataSegmentMover dataSegmentMover,
|
||||
DataSegmentArchiver dataSegmentArchiver,
|
||||
DataSegmentAnnouncer segmentAnnouncer,
|
||||
ServerView newSegmentServerView,
|
||||
QueryRunnerFactoryConglomerate queryRunnerFactoryConglomerate,
|
||||
|
@ -76,6 +81,8 @@ public class TaskToolboxFactory
|
|||
this.emitter = emitter;
|
||||
this.segmentPusher = segmentPusher;
|
||||
this.dataSegmentKiller = dataSegmentKiller;
|
||||
this.dataSegmentMover = dataSegmentMover;
|
||||
this.dataSegmentArchiver = dataSegmentArchiver;
|
||||
this.segmentAnnouncer = segmentAnnouncer;
|
||||
this.newSegmentServerView = newSegmentServerView;
|
||||
this.queryRunnerFactoryConglomerate = queryRunnerFactoryConglomerate;
|
||||
|
@ -96,6 +103,8 @@ public class TaskToolboxFactory
|
|||
emitter,
|
||||
segmentPusher,
|
||||
dataSegmentKiller,
|
||||
dataSegmentMover,
|
||||
dataSegmentArchiver,
|
||||
segmentAnnouncer,
|
||||
newSegmentServerView,
|
||||
queryRunnerFactoryConglomerate,
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package io.druid.indexing.common.actions;
|
||||
|
||||
import com.metamx.common.ISE;
|
||||
import com.metamx.emitter.EmittingLogger;
|
||||
import io.druid.indexing.common.task.Task;
|
||||
import io.druid.indexing.overlord.TaskStorage;
|
||||
|
@ -45,21 +46,21 @@ public class LocalTaskActionClient implements TaskActionClient
|
|||
{
|
||||
log.info("Performing action for task[%s]: %s", task.getId(), taskAction);
|
||||
|
||||
final RetType ret = taskAction.perform(task, toolbox);
|
||||
|
||||
if (taskAction.isAudited()) {
|
||||
// Add audit log
|
||||
try {
|
||||
storage.addAuditLog(task, taskAction);
|
||||
}
|
||||
catch (Exception e) {
|
||||
final String actionClass = taskAction.getClass().getName();
|
||||
log.makeAlert(e, "Failed to record action in audit log")
|
||||
.addData("task", task.getId())
|
||||
.addData("actionClass", taskAction.getClass().getName())
|
||||
.addData("actionClass", actionClass)
|
||||
.emit();
|
||||
throw new ISE(e, "Failed to record action [%s] in audit log", actionClass);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
return taskAction.perform(task, toolbox);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -68,7 +68,7 @@ public class LockAcquireAction implements TaskAction<TaskLock>
|
|||
@Override
|
||||
public boolean isAudited()
|
||||
{
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -60,7 +60,7 @@ public class LockReleaseAction implements TaskAction<Void>
|
|||
@Override
|
||||
public boolean isAudited()
|
||||
{
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -23,56 +23,54 @@ import com.fasterxml.jackson.annotation.JsonCreator;
|
|||
import com.fasterxml.jackson.annotation.JsonIgnore;
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
import com.fasterxml.jackson.core.type.TypeReference;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.base.Optional;
|
||||
import io.druid.indexing.common.TaskLock;
|
||||
import io.druid.indexing.common.task.Task;
|
||||
import org.joda.time.Interval;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public class SpawnTasksAction implements TaskAction<Void>
|
||||
public class LockTryAcquireAction implements TaskAction<Optional<TaskLock>>
|
||||
{
|
||||
@JsonIgnore
|
||||
private final List<Task> newTasks;
|
||||
private final Interval interval;
|
||||
|
||||
@JsonCreator
|
||||
public SpawnTasksAction(
|
||||
@JsonProperty("newTasks") List<Task> newTasks
|
||||
public LockTryAcquireAction(
|
||||
@JsonProperty("interval") Interval interval
|
||||
)
|
||||
{
|
||||
this.newTasks = ImmutableList.copyOf(newTasks);
|
||||
this.interval = interval;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public List<Task> getNewTasks()
|
||||
public Interval getInterval()
|
||||
{
|
||||
return newTasks;
|
||||
return interval;
|
||||
}
|
||||
|
||||
public TypeReference<Void> getReturnTypeReference()
|
||||
public TypeReference<Optional<TaskLock>> getReturnTypeReference()
|
||||
{
|
||||
return new TypeReference<Void>() {};
|
||||
return new TypeReference<Optional<TaskLock>>()
|
||||
{
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public Void perform(Task task, TaskActionToolbox toolbox)
|
||||
public Optional<TaskLock> perform(Task task, TaskActionToolbox toolbox)
|
||||
{
|
||||
for(final Task newTask : newTasks) {
|
||||
toolbox.getTaskQueue().add(newTask);
|
||||
}
|
||||
|
||||
return null;
|
||||
return toolbox.getTaskLockbox().tryLock(task, interval);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAudited()
|
||||
{
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString()
|
||||
{
|
||||
return "SpawnTasksAction{" +
|
||||
"newTasks=" + newTasks +
|
||||
return "LockTryAcquireAction{" +
|
||||
"interval=" + interval +
|
||||
'}';
|
||||
}
|
||||
}
|
|
@ -24,7 +24,6 @@ import com.fasterxml.jackson.annotation.JsonIgnore;
|
|||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
import com.fasterxml.jackson.core.type.TypeReference;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.metamx.common.ISE;
|
||||
import com.metamx.emitter.service.ServiceMetricEvent;
|
||||
import io.druid.indexing.common.task.Task;
|
||||
import io.druid.timeline.DataSegment;
|
||||
|
@ -80,9 +79,7 @@ public class SegmentInsertAction implements TaskAction<Set<DataSegment>>
|
|||
@Override
|
||||
public Set<DataSegment> perform(Task task, TaskActionToolbox toolbox) throws IOException
|
||||
{
|
||||
if(!toolbox.taskLockCoversSegments(task, segments, allowOlderVersions)) {
|
||||
throw new ISE("Segments not covered by locks for task[%s]: %s", task.getId(), segments);
|
||||
}
|
||||
toolbox.verifyTaskLocksAndSinglePartitionSettitude(task, segments, true);
|
||||
|
||||
final Set<DataSegment> retVal = toolbox.getIndexerDBCoordinator().announceHistoricalSegments(segments);
|
||||
|
||||
|
|
|
@ -0,0 +1,73 @@
|
|||
package io.druid.indexing.common.actions;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonCreator;
|
||||
import com.fasterxml.jackson.annotation.JsonIgnore;
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
import com.fasterxml.jackson.core.type.TypeReference;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.metamx.emitter.service.ServiceMetricEvent;
|
||||
import io.druid.indexing.common.task.Task;
|
||||
import io.druid.timeline.DataSegment;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Set;
|
||||
|
||||
public class SegmentMetadataUpdateAction implements TaskAction<Void>
|
||||
{
|
||||
@JsonIgnore
|
||||
private final Set<DataSegment> segments;
|
||||
|
||||
@JsonCreator
|
||||
public SegmentMetadataUpdateAction(
|
||||
@JsonProperty("segments") Set<DataSegment> segments
|
||||
)
|
||||
{
|
||||
this.segments = ImmutableSet.copyOf(segments);
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public Set<DataSegment> getSegments()
|
||||
{
|
||||
return segments;
|
||||
}
|
||||
|
||||
public TypeReference<Void> getReturnTypeReference()
|
||||
{
|
||||
return new TypeReference<Void>() {};
|
||||
}
|
||||
|
||||
@Override
|
||||
public Void perform(
|
||||
Task task, TaskActionToolbox toolbox
|
||||
) throws IOException
|
||||
{
|
||||
toolbox.verifyTaskLocksAndSinglePartitionSettitude(task, segments, true);
|
||||
toolbox.getIndexerDBCoordinator().updateSegmentMetadata(segments);
|
||||
|
||||
// Emit metrics
|
||||
final ServiceMetricEvent.Builder metricBuilder = new ServiceMetricEvent.Builder()
|
||||
.setUser2(task.getDataSource())
|
||||
.setUser4(task.getType());
|
||||
|
||||
for (DataSegment segment : segments) {
|
||||
metricBuilder.setUser5(segment.getInterval().toString());
|
||||
toolbox.getEmitter().emit(metricBuilder.build("indexer/segmentMoved/bytes", segment.getSize()));
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAudited()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString()
|
||||
{
|
||||
return "SegmentMetadataUpdateAction{" +
|
||||
"segments=" + segments +
|
||||
'}';
|
||||
}
|
||||
}
|
|
@ -24,7 +24,6 @@ import com.fasterxml.jackson.annotation.JsonIgnore;
|
|||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
import com.fasterxml.jackson.core.type.TypeReference;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.metamx.common.ISE;
|
||||
import com.metamx.emitter.service.ServiceMetricEvent;
|
||||
import io.druid.indexing.common.task.Task;
|
||||
import io.druid.timeline.DataSegment;
|
||||
|
@ -59,10 +58,7 @@ public class SegmentNukeAction implements TaskAction<Void>
|
|||
@Override
|
||||
public Void perform(Task task, TaskActionToolbox toolbox) throws IOException
|
||||
{
|
||||
if(!toolbox.taskLockCoversSegments(task, segments, true)) {
|
||||
throw new ISE("Segments not covered by locks for task: %s", task.getId());
|
||||
}
|
||||
|
||||
toolbox.verifyTaskLocksAndSinglePartitionSettitude(task, segments, true);
|
||||
toolbox.getIndexerDBCoordinator().deleteSegments(segments);
|
||||
|
||||
// Emit metrics
|
||||
|
|
|
@ -29,13 +29,14 @@ import java.io.IOException;
|
|||
@JsonTypeInfo(use = JsonTypeInfo.Id.NAME, property = "type")
|
||||
@JsonSubTypes(value = {
|
||||
@JsonSubTypes.Type(name = "lockAcquire", value = LockAcquireAction.class),
|
||||
@JsonSubTypes.Type(name = "lockTryAcquire", value = LockTryAcquireAction.class),
|
||||
@JsonSubTypes.Type(name = "lockList", value = LockListAction.class),
|
||||
@JsonSubTypes.Type(name = "lockRelease", value = LockReleaseAction.class),
|
||||
@JsonSubTypes.Type(name = "segmentInsertion", value = SegmentInsertAction.class),
|
||||
@JsonSubTypes.Type(name = "segmentListUsed", value = SegmentListUsedAction.class),
|
||||
@JsonSubTypes.Type(name = "segmentListUnused", value = SegmentListUnusedAction.class),
|
||||
@JsonSubTypes.Type(name = "segmentNuke", value = SegmentNukeAction.class),
|
||||
@JsonSubTypes.Type(name = "spawnTasks", value = SpawnTasksAction.class)
|
||||
@JsonSubTypes.Type(name = "segmentMetadataUpdate", value = SegmentMetadataUpdateAction.class)
|
||||
})
|
||||
public interface TaskAction<RetType>
|
||||
{
|
||||
|
|
|
@ -19,15 +19,16 @@
|
|||
|
||||
package io.druid.indexing.common.actions;
|
||||
|
||||
import com.google.api.client.repackaged.com.google.common.base.Preconditions;
|
||||
import com.google.common.base.Predicate;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.inject.Inject;
|
||||
import com.metamx.common.ISE;
|
||||
import com.metamx.emitter.service.ServiceEmitter;
|
||||
import io.druid.indexing.common.TaskLock;
|
||||
import io.druid.indexing.common.task.Task;
|
||||
import io.druid.indexing.overlord.IndexerDBCoordinator;
|
||||
import io.druid.indexing.overlord.TaskLockbox;
|
||||
import io.druid.indexing.overlord.TaskQueue;
|
||||
import io.druid.timeline.DataSegment;
|
||||
|
||||
import java.util.List;
|
||||
|
@ -35,30 +36,22 @@ import java.util.Set;
|
|||
|
||||
public class TaskActionToolbox
|
||||
{
|
||||
private final TaskQueue taskQueue;
|
||||
private final TaskLockbox taskLockbox;
|
||||
private final IndexerDBCoordinator indexerDBCoordinator;
|
||||
private final ServiceEmitter emitter;
|
||||
|
||||
@Inject
|
||||
public TaskActionToolbox(
|
||||
TaskQueue taskQueue,
|
||||
TaskLockbox taskLockbox,
|
||||
IndexerDBCoordinator indexerDBCoordinator,
|
||||
ServiceEmitter emitter
|
||||
)
|
||||
{
|
||||
this.taskQueue = taskQueue;
|
||||
this.taskLockbox = taskLockbox;
|
||||
this.indexerDBCoordinator = indexerDBCoordinator;
|
||||
this.emitter = emitter;
|
||||
}
|
||||
|
||||
public TaskQueue getTaskQueue()
|
||||
{
|
||||
return taskQueue;
|
||||
}
|
||||
|
||||
public TaskLockbox getTaskLockbox()
|
||||
{
|
||||
return taskLockbox;
|
||||
|
@ -74,6 +67,38 @@ public class TaskActionToolbox
|
|||
return emitter;
|
||||
}
|
||||
|
||||
public boolean segmentsAreFromSamePartitionSet(
|
||||
final Set<DataSegment> segments
|
||||
)
|
||||
{
|
||||
// Verify that these segments are all in the same partition set
|
||||
|
||||
Preconditions.checkArgument(!segments.isEmpty(), "segments nonempty");
|
||||
final DataSegment firstSegment = segments.iterator().next();
|
||||
for (final DataSegment segment : segments) {
|
||||
if (!segment.getDataSource().equals(firstSegment.getDataSource())
|
||||
|| !segment.getInterval().equals(firstSegment.getInterval())
|
||||
|| !segment.getVersion().equals(firstSegment.getVersion())) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
public void verifyTaskLocksAndSinglePartitionSettitude(
|
||||
final Task task,
|
||||
final Set<DataSegment> segments,
|
||||
final boolean allowOlderVersions
|
||||
)
|
||||
{
|
||||
if (!taskLockCoversSegments(task, segments, allowOlderVersions)) {
|
||||
throw new ISE("Segments not covered by locks for task: %s", task.getId());
|
||||
}
|
||||
if (!segmentsAreFromSamePartitionSet(segments)) {
|
||||
throw new ISE("Segments are not in the same partition set: %s", segments);
|
||||
}
|
||||
}
|
||||
|
||||
public boolean taskLockCoversSegments(
|
||||
final Task task,
|
||||
final Set<DataSegment> segments,
|
||||
|
|
|
@ -19,15 +19,19 @@
|
|||
|
||||
package io.druid.indexing.common.config;
|
||||
|
||||
import io.druid.server.initialization.ZkPathsConfig;
|
||||
import org.skife.config.Config;
|
||||
import org.skife.config.Default;
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
|
||||
/**
|
||||
*/
|
||||
public abstract class IndexerZkConfig extends ZkPathsConfig
|
||||
import javax.validation.constraints.NotNull;
|
||||
import java.io.File;
|
||||
|
||||
public class FileTaskLogsConfig
|
||||
{
|
||||
@Config("druid.zk.maxNumBytes")
|
||||
@Default("512000")
|
||||
public abstract long getMaxNumBytes();
|
||||
@JsonProperty
|
||||
@NotNull
|
||||
private File directory = new File("log");
|
||||
|
||||
public File getDirectory()
|
||||
{
|
||||
return directory;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
package io.druid.indexing.common.config;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
import org.joda.time.Duration;
|
||||
import org.joda.time.Period;
|
||||
|
||||
import javax.validation.constraints.NotNull;
|
||||
|
||||
public class TaskStorageConfig
|
||||
{
|
||||
@JsonProperty
|
||||
@NotNull
|
||||
public Duration recentlyFinishedThreshold = new Period("PT24H").toStandardDuration();
|
||||
|
||||
public Duration getRecentlyFinishedThreshold()
|
||||
{
|
||||
return recentlyFinishedThreshold;
|
||||
}
|
||||
}
|
|
@ -33,7 +33,6 @@ import io.druid.data.input.Firehose;
|
|||
import io.druid.data.input.FirehoseFactory;
|
||||
import io.druid.data.input.InputRow;
|
||||
import io.druid.data.input.impl.MapInputRowParser;
|
||||
import io.druid.indexing.common.config.EventReceiverFirehoseFactoryConfig;
|
||||
|
||||
import javax.ws.rs.POST;
|
||||
import javax.ws.rs.Path;
|
||||
|
@ -63,31 +62,15 @@ public class EventReceiverFirehoseFactory implements FirehoseFactory
|
|||
private final MapInputRowParser parser;
|
||||
private final Optional<ChatHandlerProvider> chatHandlerProvider;
|
||||
|
||||
@Deprecated
|
||||
private final EventReceiverFirehoseFactoryConfig config;
|
||||
|
||||
@JsonCreator
|
||||
public EventReceiverFirehoseFactory(
|
||||
@JsonProperty("serviceName") String serviceName,
|
||||
@JsonProperty("firehoseId") String firehoseId,
|
||||
@JsonProperty("bufferSize") Integer bufferSize,
|
||||
@JsonProperty("parser") MapInputRowParser parser,
|
||||
@JacksonInject ChatHandlerProvider chatHandlerProvider,
|
||||
@JacksonInject EventReceiverFirehoseFactoryConfig config
|
||||
@JacksonInject ChatHandlerProvider chatHandlerProvider
|
||||
)
|
||||
{
|
||||
// This code is here for backwards compatibility
|
||||
if (serviceName == null) {
|
||||
this.serviceName = String.format(
|
||||
"%s:%s",
|
||||
config.getFirehoseIdPrefix(),
|
||||
Preconditions.checkNotNull(firehoseId, "firehoseId")
|
||||
);
|
||||
} else {
|
||||
this.serviceName = serviceName;
|
||||
}
|
||||
this.config = config;
|
||||
|
||||
this.serviceName = Preconditions.checkNotNull(serviceName, "serviceName");
|
||||
this.bufferSize = bufferSize == null || bufferSize <= 0 ? DEFAULT_BUFFER_SIZE : bufferSize;
|
||||
this.parser = Preconditions.checkNotNull(parser, "parser");
|
||||
this.chatHandlerProvider = Optional.fromNullable(chatHandlerProvider);
|
||||
|
@ -117,13 +100,6 @@ public class EventReceiverFirehoseFactory implements FirehoseFactory
|
|||
return serviceName;
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
@JsonProperty("firehoseId")
|
||||
public String getFirehoseId()
|
||||
{
|
||||
return serviceName.replaceFirst(String.format("%s:", config.getFirehoseIdPrefix()), "");
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public int getBufferSize()
|
||||
{
|
||||
|
|
|
@ -0,0 +1,77 @@
|
|||
/*
|
||||
* Druid - a distributed column store.
|
||||
* Copyright (C) 2012, 2013 Metamarkets Group Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
package io.druid.indexing.common.task;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonIgnore;
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
import com.google.common.base.Preconditions;
|
||||
import io.druid.indexing.common.actions.LockTryAcquireAction;
|
||||
import io.druid.indexing.common.actions.TaskActionClient;
|
||||
import org.joda.time.Interval;
|
||||
|
||||
public abstract class AbstractFixedIntervalTask extends AbstractTask
|
||||
{
|
||||
@JsonIgnore
|
||||
private final Interval interval;
|
||||
|
||||
protected AbstractFixedIntervalTask(
|
||||
String id,
|
||||
String dataSource,
|
||||
Interval interval
|
||||
)
|
||||
{
|
||||
this(id, id, new TaskResource(id, 1), dataSource, interval);
|
||||
}
|
||||
|
||||
protected AbstractFixedIntervalTask(
|
||||
String id,
|
||||
String groupId,
|
||||
String dataSource,
|
||||
Interval interval
|
||||
)
|
||||
{
|
||||
this(id, groupId, new TaskResource(id, 1), dataSource, interval);
|
||||
}
|
||||
|
||||
protected AbstractFixedIntervalTask(
|
||||
String id,
|
||||
String groupId,
|
||||
TaskResource taskResource,
|
||||
String dataSource,
|
||||
Interval interval
|
||||
)
|
||||
{
|
||||
super(id, groupId, taskResource, dataSource);
|
||||
this.interval = Preconditions.checkNotNull(interval, "interval");
|
||||
Preconditions.checkArgument(interval.toDurationMillis() > 0, "interval empty");
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isReady(TaskActionClient taskActionClient) throws Exception
|
||||
{
|
||||
return taskActionClient.submit(new LockTryAcquireAction(interval)).isPresent();
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public Interval getInterval()
|
||||
{
|
||||
return interval;
|
||||
}
|
||||
}
|
|
@ -23,21 +23,15 @@ import com.fasterxml.jackson.annotation.JsonIgnore;
|
|||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
import com.google.common.base.Joiner;
|
||||
import com.google.common.base.Objects;
|
||||
import com.google.common.base.Optional;
|
||||
import com.google.common.base.Preconditions;
|
||||
import io.druid.indexing.common.TaskLock;
|
||||
import io.druid.indexing.common.TaskStatus;
|
||||
import io.druid.indexing.common.TaskToolbox;
|
||||
import io.druid.indexing.common.actions.LockAcquireAction;
|
||||
import io.druid.indexing.common.actions.LockListAction;
|
||||
import io.druid.indexing.common.actions.SegmentListUsedAction;
|
||||
import io.druid.indexing.common.actions.TaskActionClient;
|
||||
import io.druid.query.Query;
|
||||
import io.druid.query.QueryRunner;
|
||||
import org.joda.time.Interval;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
public abstract class AbstractTask implements Task
|
||||
{
|
||||
|
@ -55,26 +49,22 @@ public abstract class AbstractTask implements Task
|
|||
@JsonIgnore
|
||||
private final String dataSource;
|
||||
|
||||
@JsonIgnore
|
||||
private final Optional<Interval> interval;
|
||||
|
||||
protected AbstractTask(String id, String dataSource, Interval interval)
|
||||
protected AbstractTask(String id, String dataSource)
|
||||
{
|
||||
this(id, id, new TaskResource(id, 1), dataSource, interval);
|
||||
this(id, id, new TaskResource(id, 1), dataSource);
|
||||
}
|
||||
|
||||
protected AbstractTask(String id, String groupId, String dataSource, Interval interval)
|
||||
protected AbstractTask(String id, String groupId, String dataSource)
|
||||
{
|
||||
this(id, groupId, new TaskResource(id, 1), dataSource, interval);
|
||||
this(id, groupId, new TaskResource(id, 1), dataSource);
|
||||
}
|
||||
|
||||
protected AbstractTask(String id, String groupId, TaskResource taskResource, String dataSource, Interval interval)
|
||||
protected AbstractTask(String id, String groupId, TaskResource taskResource, String dataSource)
|
||||
{
|
||||
this.id = Preconditions.checkNotNull(id, "id");
|
||||
this.groupId = Preconditions.checkNotNull(groupId, "groupId");
|
||||
this.taskResource = Preconditions.checkNotNull(taskResource, "resource");
|
||||
this.dataSource = Preconditions.checkNotNull(dataSource, "dataSource");
|
||||
this.interval = Optional.fromNullable(interval);
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
|
@ -111,25 +101,12 @@ public abstract class AbstractTask implements Task
|
|||
return dataSource;
|
||||
}
|
||||
|
||||
@JsonProperty("interval")
|
||||
@Override
|
||||
public Optional<Interval> getImplicitLockInterval()
|
||||
{
|
||||
return interval;
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> QueryRunner<T> getQueryRunner(Query<T> query)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TaskStatus preflight(TaskActionClient taskActionClient) throws Exception
|
||||
{
|
||||
return TaskStatus.running(id);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString()
|
||||
{
|
||||
|
@ -137,7 +114,6 @@ public abstract class AbstractTask implements Task
|
|||
.add("id", id)
|
||||
.add("type", getType())
|
||||
.add("dataSource", dataSource)
|
||||
.add("interval", getImplicitLockInterval())
|
||||
.toString();
|
||||
}
|
||||
|
||||
|
@ -149,11 +125,6 @@ public abstract class AbstractTask implements Task
|
|||
return ID_JOINER.join(objects);
|
||||
}
|
||||
|
||||
public SegmentListUsedAction defaultListUsedAction()
|
||||
{
|
||||
return new SegmentListUsedAction(getDataSource(), getImplicitLockInterval().get());
|
||||
}
|
||||
|
||||
public TaskStatus success()
|
||||
{
|
||||
return TaskStatus.success(getId());
|
||||
|
@ -186,14 +157,6 @@ public abstract class AbstractTask implements Task
|
|||
|
||||
protected Iterable<TaskLock> getTaskLocks(TaskToolbox toolbox) throws IOException
|
||||
{
|
||||
final List<TaskLock> locks = toolbox.getTaskActionClient().submit(new LockListAction());
|
||||
|
||||
if (locks.isEmpty() && getImplicitLockInterval().isPresent()) {
|
||||
// In the Peon's local mode, the implicit lock interval is not pre-acquired, so we need to try it here.
|
||||
toolbox.getTaskActionClient().submit(new LockAcquireAction(getImplicitLockInterval().get()));
|
||||
return toolbox.getTaskActionClient().submit(new LockListAction());
|
||||
} else {
|
||||
return locks;
|
||||
}
|
||||
return toolbox.getTaskActionClient().submit(new LockListAction());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,110 @@
|
|||
/*
|
||||
* Druid - a distributed column store.
|
||||
* Copyright (C) 2012, 2013 Metamarkets Group Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
package io.druid.indexing.common.task;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.metamx.common.ISE;
|
||||
import com.metamx.common.logger.Logger;
|
||||
import io.druid.indexing.common.TaskLock;
|
||||
import io.druid.indexing.common.TaskStatus;
|
||||
import io.druid.indexing.common.TaskToolbox;
|
||||
import io.druid.indexing.common.actions.SegmentListUnusedAction;
|
||||
import io.druid.indexing.common.actions.SegmentMetadataUpdateAction;
|
||||
import io.druid.timeline.DataSegment;
|
||||
import org.joda.time.Interval;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public class ArchiveTask extends AbstractFixedIntervalTask
|
||||
{
|
||||
private static final Logger log = new Logger(ArchiveTask.class);
|
||||
|
||||
public ArchiveTask(
|
||||
@JsonProperty("id") String id,
|
||||
@JsonProperty("dataSource") String dataSource,
|
||||
@JsonProperty("interval") Interval interval
|
||||
)
|
||||
{
|
||||
super(
|
||||
TaskUtils.makeId(id, "archive", dataSource, interval),
|
||||
dataSource,
|
||||
interval
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getType()
|
||||
{
|
||||
return "archive";
|
||||
}
|
||||
|
||||
@Override
|
||||
public TaskStatus run(TaskToolbox toolbox) throws Exception
|
||||
{
|
||||
// Confirm we have a lock (will throw if there isn't exactly one element)
|
||||
final TaskLock myLock = Iterables.getOnlyElement(getTaskLocks(toolbox));
|
||||
|
||||
if (!myLock.getDataSource().equals(getDataSource())) {
|
||||
throw new ISE("WTF?! Lock dataSource[%s] != task dataSource[%s]", myLock.getDataSource(), getDataSource());
|
||||
}
|
||||
|
||||
if (!myLock.getInterval().equals(getInterval())) {
|
||||
throw new ISE("WTF?! Lock interval[%s] != task interval[%s]", myLock.getInterval(), getInterval());
|
||||
}
|
||||
|
||||
// List unused segments
|
||||
final List<DataSegment> unusedSegments = toolbox
|
||||
.getTaskActionClient()
|
||||
.submit(new SegmentListUnusedAction(myLock.getDataSource(), myLock.getInterval()));
|
||||
|
||||
// Verify none of these segments have versions > lock version
|
||||
for (final DataSegment unusedSegment : unusedSegments) {
|
||||
if (unusedSegment.getVersion().compareTo(myLock.getVersion()) > 0) {
|
||||
throw new ISE(
|
||||
"WTF?! Unused segment[%s] has version[%s] > task version[%s]",
|
||||
unusedSegment.getIdentifier(),
|
||||
unusedSegment.getVersion(),
|
||||
myLock.getVersion()
|
||||
);
|
||||
}
|
||||
|
||||
log.info("OK to archive segment: %s", unusedSegment.getIdentifier());
|
||||
}
|
||||
|
||||
List<DataSegment> archivedSegments = Lists.newLinkedList();
|
||||
|
||||
// Move segments
|
||||
for (DataSegment segment : unusedSegments) {
|
||||
archivedSegments.add(toolbox.getDataSegmentArchiver().archive(segment));
|
||||
}
|
||||
|
||||
// Update metadata for moved segments
|
||||
toolbox.getTaskActionClient().submit(
|
||||
new SegmentMetadataUpdateAction(
|
||||
ImmutableSet.copyOf(archivedSegments)
|
||||
)
|
||||
);
|
||||
|
||||
return TaskStatus.success(getId());
|
||||
}
|
||||
}
|
|
@ -22,6 +22,7 @@ package io.druid.indexing.common.task;
|
|||
import com.fasterxml.jackson.annotation.JsonCreator;
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.collect.Lists;
|
||||
|
@ -30,7 +31,6 @@ import io.druid.granularity.QueryGranularity;
|
|||
import io.druid.indexing.common.TaskLock;
|
||||
import io.druid.indexing.common.TaskStatus;
|
||||
import io.druid.indexing.common.TaskToolbox;
|
||||
import io.druid.indexing.common.actions.LockListAction;
|
||||
import io.druid.indexing.common.actions.SegmentInsertAction;
|
||||
import io.druid.query.aggregation.AggregatorFactory;
|
||||
import io.druid.segment.IndexMerger;
|
||||
|
@ -44,7 +44,7 @@ import org.joda.time.Interval;
|
|||
|
||||
import java.io.File;
|
||||
|
||||
public class DeleteTask extends AbstractTask
|
||||
public class DeleteTask extends AbstractFixedIntervalTask
|
||||
{
|
||||
private static final Logger log = new Logger(DeleteTask.class);
|
||||
|
||||
|
@ -78,16 +78,15 @@ public class DeleteTask extends AbstractTask
|
|||
public TaskStatus run(TaskToolbox toolbox) throws Exception
|
||||
{
|
||||
// Strategy: Create an empty segment covering the interval to be deleted
|
||||
final TaskLock myLock = Iterables.getOnlyElement(getTaskLocks(toolbox));
|
||||
final Interval interval = this.getImplicitLockInterval().get();
|
||||
final TaskLock myLock = Iterables.getOnlyElement(getTaskLocks(toolbox));
|
||||
final IncrementalIndex empty = new IncrementalIndex(0, QueryGranularity.NONE, new AggregatorFactory[0]);
|
||||
final IndexableAdapter emptyAdapter = new IncrementalIndexAdapter(interval, empty);
|
||||
final IndexableAdapter emptyAdapter = new IncrementalIndexAdapter(getInterval(), empty);
|
||||
|
||||
// Create DataSegment
|
||||
final DataSegment segment =
|
||||
DataSegment.builder()
|
||||
.dataSource(this.getDataSource())
|
||||
.interval(interval)
|
||||
.interval(getInterval())
|
||||
.version(myLock.getVersion())
|
||||
.shardSpec(new NoneShardSpec())
|
||||
.build();
|
||||
|
@ -105,7 +104,7 @@ public class DeleteTask extends AbstractTask
|
|||
segment.getVersion()
|
||||
);
|
||||
|
||||
toolbox.getTaskActionClient().submit(new SegmentInsertAction(ImmutableSet.of(uploadedSegment)));
|
||||
toolbox.pushSegments(ImmutableList.of(uploadedSegment));
|
||||
|
||||
return TaskStatus.success(getId());
|
||||
}
|
||||
|
|
|
@ -24,10 +24,14 @@ import com.fasterxml.jackson.annotation.JsonIgnore;
|
|||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
import com.fasterxml.jackson.core.type.TypeReference;
|
||||
import com.google.api.client.util.Lists;
|
||||
import com.google.common.base.Function;
|
||||
import com.google.common.base.Joiner;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.collect.Maps;
|
||||
import com.google.common.collect.Multimap;
|
||||
import com.google.common.collect.Multimaps;
|
||||
import com.metamx.common.logger.Logger;
|
||||
import io.druid.common.utils.JodaUtils;
|
||||
import io.druid.indexer.HadoopDruidIndexerConfig;
|
||||
|
@ -37,21 +41,27 @@ import io.druid.indexer.HadoopDruidIndexerSchema;
|
|||
import io.druid.indexing.common.TaskLock;
|
||||
import io.druid.indexing.common.TaskStatus;
|
||||
import io.druid.indexing.common.TaskToolbox;
|
||||
import io.druid.indexing.common.actions.LockTryAcquireAction;
|
||||
import io.druid.indexing.common.actions.SegmentInsertAction;
|
||||
import io.druid.indexing.common.actions.TaskActionClient;
|
||||
import io.druid.initialization.Initialization;
|
||||
import io.druid.server.initialization.ExtensionsConfig;
|
||||
import io.druid.timeline.DataSegment;
|
||||
import io.tesla.aether.internal.DefaultTeslaAether;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.Interval;
|
||||
|
||||
import javax.annotation.Nullable;
|
||||
import java.io.File;
|
||||
import java.lang.reflect.Method;
|
||||
import java.net.URL;
|
||||
import java.net.URLClassLoader;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
public class HadoopIndexTask extends AbstractTask
|
||||
public class HadoopIndexTask extends AbstractFixedIntervalTask
|
||||
{
|
||||
private static final Logger log = new Logger(HadoopIndexTask.class);
|
||||
private static String defaultHadoopCoordinates = "org.apache.hadoop:hadoop-core:1.0.3";
|
||||
|
@ -88,10 +98,14 @@ public class HadoopIndexTask extends AbstractTask
|
|||
super(
|
||||
id != null ? id : String.format("index_hadoop_%s_%s", schema.getDataSource(), new DateTime()),
|
||||
schema.getDataSource(),
|
||||
JodaUtils.umbrellaInterval(JodaUtils.condenseIntervals(schema.getGranularitySpec().bucketIntervals()))
|
||||
JodaUtils.umbrellaInterval(
|
||||
JodaUtils.condenseIntervals(
|
||||
schema.getGranularitySpec()
|
||||
.bucketIntervals()
|
||||
)
|
||||
)
|
||||
);
|
||||
|
||||
|
||||
// Some HadoopDruidIndexerSchema stuff doesn't make sense in the context of the indexing service
|
||||
Preconditions.checkArgument(schema.getSegmentOutputPath() == null, "segmentOutputPath must be absent");
|
||||
Preconditions.checkArgument(schema.getWorkingPath() == null, "workingPath must be absent");
|
||||
|
@ -107,7 +121,6 @@ public class HadoopIndexTask extends AbstractTask
|
|||
return "index_hadoop";
|
||||
}
|
||||
|
||||
|
||||
@JsonProperty("config")
|
||||
public HadoopDruidIndexerSchema getSchema()
|
||||
{
|
||||
|
@ -174,14 +187,10 @@ public class HadoopIndexTask extends AbstractTask
|
|||
|
||||
if (segments != null) {
|
||||
List<DataSegment> publishedSegments = toolbox.getObjectMapper().readValue(
|
||||
segments, new TypeReference<List<DataSegment>>()
|
||||
{
|
||||
}
|
||||
segments,
|
||||
new TypeReference<List<DataSegment>>() {}
|
||||
);
|
||||
// Request segment pushes
|
||||
toolbox.getTaskActionClient().submit(new SegmentInsertAction(ImmutableSet.copyOf(publishedSegments)));
|
||||
|
||||
// Done
|
||||
toolbox.pushSegments(publishedSegments);
|
||||
return TaskStatus.success(getId());
|
||||
} else {
|
||||
return TaskStatus.failure(getId());
|
||||
|
|
|
@ -1,299 +0,0 @@
|
|||
/*
|
||||
* Druid - a distributed column store.
|
||||
* Copyright (C) 2012, 2013 Metamarkets Group Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
package io.druid.indexing.common.task;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonCreator;
|
||||
import com.fasterxml.jackson.annotation.JsonIgnore;
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
import com.google.common.base.Function;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
import com.google.common.collect.Ordering;
|
||||
import com.google.common.collect.Sets;
|
||||
import com.google.common.collect.TreeMultiset;
|
||||
import com.google.common.primitives.Ints;
|
||||
import com.metamx.common.logger.Logger;
|
||||
import io.druid.data.input.Firehose;
|
||||
import io.druid.data.input.FirehoseFactory;
|
||||
import io.druid.data.input.InputRow;
|
||||
import io.druid.indexing.common.TaskStatus;
|
||||
import io.druid.indexing.common.TaskToolbox;
|
||||
import io.druid.indexing.common.actions.SpawnTasksAction;
|
||||
import io.druid.segment.realtime.Schema;
|
||||
import io.druid.timeline.partition.NoneShardSpec;
|
||||
import io.druid.timeline.partition.ShardSpec;
|
||||
import io.druid.timeline.partition.SingleDimensionShardSpec;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.Interval;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
public class IndexDeterminePartitionsTask extends AbstractTask
|
||||
{
|
||||
private static String makeTaskId(String groupId, DateTime start, DateTime end)
|
||||
{
|
||||
return String.format(
|
||||
"%s_partitions_%s_%s",
|
||||
groupId,
|
||||
start,
|
||||
end
|
||||
);
|
||||
}
|
||||
|
||||
@JsonIgnore
|
||||
private final FirehoseFactory firehoseFactory;
|
||||
|
||||
@JsonIgnore
|
||||
private final Schema schema;
|
||||
|
||||
@JsonIgnore
|
||||
private final long targetPartitionSize;
|
||||
|
||||
@JsonIgnore
|
||||
private final int rowFlushBoundary;
|
||||
|
||||
private static final Logger log = new Logger(IndexTask.class);
|
||||
|
||||
@JsonCreator
|
||||
public IndexDeterminePartitionsTask(
|
||||
@JsonProperty("id") String id,
|
||||
@JsonProperty("groupId") String groupId,
|
||||
@JsonProperty("interval") Interval interval,
|
||||
@JsonProperty("firehose") FirehoseFactory firehoseFactory,
|
||||
@JsonProperty("schema") Schema schema,
|
||||
@JsonProperty("targetPartitionSize") long targetPartitionSize,
|
||||
@JsonProperty("rowFlushBoundary") int rowFlushBoundary
|
||||
)
|
||||
{
|
||||
super(
|
||||
id != null ? id : makeTaskId(groupId, interval.getStart(), interval.getEnd()),
|
||||
groupId,
|
||||
schema.getDataSource(),
|
||||
Preconditions.checkNotNull(interval, "interval")
|
||||
);
|
||||
|
||||
this.firehoseFactory = firehoseFactory;
|
||||
this.schema = schema;
|
||||
this.targetPartitionSize = targetPartitionSize;
|
||||
this.rowFlushBoundary = rowFlushBoundary;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getType()
|
||||
{
|
||||
return "index_partitions";
|
||||
}
|
||||
|
||||
@Override
|
||||
public TaskStatus run(TaskToolbox toolbox) throws Exception
|
||||
{
|
||||
log.info("Running with targetPartitionSize[%d]", targetPartitionSize);
|
||||
|
||||
// The implementation of this determine partitions stuff is less than optimal. Should be done better.
|
||||
|
||||
// We know this exists
|
||||
final Interval interval = getImplicitLockInterval().get();
|
||||
|
||||
// Blacklist dimensions that have multiple values per row
|
||||
final Set<String> unusableDimensions = Sets.newHashSet();
|
||||
|
||||
// Track values of all non-blacklisted dimensions
|
||||
final Map<String, TreeMultiset<String>> dimensionValueMultisets = Maps.newHashMap();
|
||||
|
||||
// Load data
|
||||
final Firehose firehose = firehoseFactory.connect();
|
||||
|
||||
try {
|
||||
while (firehose.hasMore()) {
|
||||
|
||||
final InputRow inputRow = firehose.nextRow();
|
||||
|
||||
if (interval.contains(inputRow.getTimestampFromEpoch())) {
|
||||
|
||||
// Extract dimensions from event
|
||||
for (final String dim : inputRow.getDimensions()) {
|
||||
final List<String> dimValues = inputRow.getDimension(dim);
|
||||
|
||||
if (!unusableDimensions.contains(dim)) {
|
||||
|
||||
if (dimValues.size() == 1) {
|
||||
|
||||
// Track this value
|
||||
TreeMultiset<String> dimensionValueMultiset = dimensionValueMultisets.get(dim);
|
||||
|
||||
if (dimensionValueMultiset == null) {
|
||||
dimensionValueMultiset = TreeMultiset.create();
|
||||
dimensionValueMultisets.put(dim, dimensionValueMultiset);
|
||||
}
|
||||
|
||||
dimensionValueMultiset.add(dimValues.get(0));
|
||||
|
||||
} else {
|
||||
|
||||
// Only single-valued dimensions can be used for partitions
|
||||
unusableDimensions.add(dim);
|
||||
dimensionValueMultisets.remove(dim);
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
finally {
|
||||
firehose.close();
|
||||
}
|
||||
|
||||
// ShardSpecs for index generator tasks
|
||||
final List<ShardSpec> shardSpecs = Lists.newArrayList();
|
||||
|
||||
// Select highest-cardinality dimension
|
||||
Ordering<Map.Entry<String, TreeMultiset<String>>> byCardinalityOrdering = new Ordering<Map.Entry<String, TreeMultiset<String>>>()
|
||||
{
|
||||
@Override
|
||||
public int compare(
|
||||
Map.Entry<String, TreeMultiset<String>> left,
|
||||
Map.Entry<String, TreeMultiset<String>> right
|
||||
)
|
||||
{
|
||||
return Ints.compare(left.getValue().elementSet().size(), right.getValue().elementSet().size());
|
||||
}
|
||||
};
|
||||
|
||||
if (dimensionValueMultisets.isEmpty()) {
|
||||
// No suitable partition dimension. We'll make one big segment and hope for the best.
|
||||
log.info("No suitable partition dimension found");
|
||||
shardSpecs.add(new NoneShardSpec());
|
||||
} else {
|
||||
// Find best partition dimension (heuristic: highest cardinality).
|
||||
final Map.Entry<String, TreeMultiset<String>> partitionEntry =
|
||||
byCardinalityOrdering.max(dimensionValueMultisets.entrySet());
|
||||
|
||||
final String partitionDim = partitionEntry.getKey();
|
||||
final TreeMultiset<String> partitionDimValues = partitionEntry.getValue();
|
||||
|
||||
log.info(
|
||||
"Partitioning on dimension[%s] with cardinality[%d] over rows[%d]",
|
||||
partitionDim,
|
||||
partitionDimValues.elementSet().size(),
|
||||
partitionDimValues.size()
|
||||
);
|
||||
|
||||
// Iterate over unique partition dimension values in sorted order
|
||||
String currentPartitionStart = null;
|
||||
int currentPartitionSize = 0;
|
||||
for (final String partitionDimValue : partitionDimValues.elementSet()) {
|
||||
currentPartitionSize += partitionDimValues.count(partitionDimValue);
|
||||
if (currentPartitionSize >= targetPartitionSize) {
|
||||
final ShardSpec shardSpec = new SingleDimensionShardSpec(
|
||||
partitionDim,
|
||||
currentPartitionStart,
|
||||
partitionDimValue,
|
||||
shardSpecs.size()
|
||||
);
|
||||
|
||||
log.info("Adding shard: %s", shardSpec);
|
||||
shardSpecs.add(shardSpec);
|
||||
|
||||
currentPartitionSize = partitionDimValues.count(partitionDimValue);
|
||||
currentPartitionStart = partitionDimValue;
|
||||
}
|
||||
}
|
||||
|
||||
if (currentPartitionSize > 0) {
|
||||
// One last shard to go
|
||||
final ShardSpec shardSpec;
|
||||
|
||||
if (shardSpecs.isEmpty()) {
|
||||
shardSpec = new NoneShardSpec();
|
||||
} else {
|
||||
shardSpec = new SingleDimensionShardSpec(
|
||||
partitionDim,
|
||||
currentPartitionStart,
|
||||
null,
|
||||
shardSpecs.size()
|
||||
);
|
||||
}
|
||||
|
||||
log.info("Adding shard: %s", shardSpec);
|
||||
shardSpecs.add(shardSpec);
|
||||
}
|
||||
}
|
||||
|
||||
List<Task> nextTasks = Lists.transform(
|
||||
shardSpecs,
|
||||
new Function<ShardSpec, Task>()
|
||||
{
|
||||
@Override
|
||||
public Task apply(ShardSpec shardSpec)
|
||||
{
|
||||
return new IndexGeneratorTask(
|
||||
null,
|
||||
getGroupId(),
|
||||
getImplicitLockInterval().get(),
|
||||
firehoseFactory,
|
||||
new Schema(
|
||||
schema.getDataSource(),
|
||||
schema.getSpatialDimensions(),
|
||||
schema.getAggregators(),
|
||||
schema.getIndexGranularity(),
|
||||
shardSpec
|
||||
),
|
||||
rowFlushBoundary
|
||||
);
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
toolbox.getTaskActionClient().submit(new SpawnTasksAction(nextTasks));
|
||||
|
||||
return TaskStatus.success(getId());
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public FirehoseFactory getFirehoseFactory()
|
||||
{
|
||||
return firehoseFactory;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public Schema getSchema()
|
||||
{
|
||||
return schema;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public long getTargetPartitionSize()
|
||||
{
|
||||
return targetPartitionSize;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public int getRowFlushBoundary()
|
||||
{
|
||||
return rowFlushBoundary;
|
||||
}
|
||||
}
|
|
@ -1,240 +0,0 @@
|
|||
/*
|
||||
* Druid - a distributed column store.
|
||||
* Copyright (C) 2012, 2013 Metamarkets Group Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
package io.druid.indexing.common.task;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonCreator;
|
||||
import com.fasterxml.jackson.annotation.JsonIgnore;
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.metamx.common.logger.Logger;
|
||||
import io.druid.data.input.Firehose;
|
||||
import io.druid.data.input.FirehoseFactory;
|
||||
import io.druid.data.input.InputRow;
|
||||
import io.druid.indexing.common.TaskLock;
|
||||
import io.druid.indexing.common.TaskStatus;
|
||||
import io.druid.indexing.common.TaskToolbox;
|
||||
import io.druid.indexing.common.actions.SegmentInsertAction;
|
||||
import io.druid.indexing.common.index.YeOldePlumberSchool;
|
||||
import io.druid.segment.loading.DataSegmentPusher;
|
||||
import io.druid.segment.realtime.FireDepartmentMetrics;
|
||||
import io.druid.segment.realtime.Schema;
|
||||
import io.druid.segment.realtime.plumber.Plumber;
|
||||
import io.druid.segment.realtime.plumber.Sink;
|
||||
import io.druid.timeline.DataSegment;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.Interval;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
|
||||
public class IndexGeneratorTask extends AbstractTask
|
||||
{
|
||||
@JsonIgnore
|
||||
private final FirehoseFactory firehoseFactory;
|
||||
|
||||
@JsonIgnore
|
||||
private final Schema schema;
|
||||
|
||||
@JsonIgnore
|
||||
private final int rowFlushBoundary;
|
||||
|
||||
private static final Logger log = new Logger(IndexTask.class);
|
||||
|
||||
@JsonCreator
|
||||
public IndexGeneratorTask(
|
||||
@JsonProperty("id") String id,
|
||||
@JsonProperty("groupId") String groupId,
|
||||
@JsonProperty("interval") Interval interval,
|
||||
@JsonProperty("firehose") FirehoseFactory firehoseFactory,
|
||||
@JsonProperty("schema") Schema schema,
|
||||
@JsonProperty("rowFlushBoundary") int rowFlushBoundary
|
||||
)
|
||||
{
|
||||
super(
|
||||
id != null
|
||||
? id
|
||||
: String.format(
|
||||
"%s_generator_%s_%s_%s",
|
||||
groupId,
|
||||
interval.getStart(),
|
||||
interval.getEnd(),
|
||||
schema.getShardSpec().getPartitionNum()
|
||||
),
|
||||
groupId,
|
||||
schema.getDataSource(),
|
||||
Preconditions.checkNotNull(interval, "interval")
|
||||
);
|
||||
|
||||
this.firehoseFactory = firehoseFactory;
|
||||
this.schema = schema;
|
||||
this.rowFlushBoundary = rowFlushBoundary;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getType()
|
||||
{
|
||||
return "index_generator";
|
||||
}
|
||||
|
||||
@Override
|
||||
public TaskStatus run(final TaskToolbox toolbox) throws Exception
|
||||
{
|
||||
// We should have a lock from before we started running
|
||||
final TaskLock myLock = Iterables.getOnlyElement(getTaskLocks(toolbox));
|
||||
|
||||
// We know this exists
|
||||
final Interval interval = getImplicitLockInterval().get();
|
||||
|
||||
// Set up temporary directory for indexing
|
||||
final File tmpDir = new File(
|
||||
toolbox.getTaskWorkDir(),
|
||||
String.format(
|
||||
"%s_%s_%s_%s_%s",
|
||||
this.getDataSource(),
|
||||
interval.getStart(),
|
||||
interval.getEnd(),
|
||||
myLock.getVersion(),
|
||||
schema.getShardSpec().getPartitionNum()
|
||||
)
|
||||
);
|
||||
|
||||
// We need to track published segments.
|
||||
final List<DataSegment> pushedSegments = new CopyOnWriteArrayList<DataSegment>();
|
||||
final DataSegmentPusher wrappedDataSegmentPusher = new DataSegmentPusher()
|
||||
{
|
||||
@Override
|
||||
public String getPathForHadoop(String dataSource)
|
||||
{
|
||||
return toolbox.getSegmentPusher().getPathForHadoop(dataSource);
|
||||
}
|
||||
|
||||
@Override
|
||||
public DataSegment push(File file, DataSegment segment) throws IOException
|
||||
{
|
||||
final DataSegment pushedSegment = toolbox.getSegmentPusher().push(file, segment);
|
||||
pushedSegments.add(pushedSegment);
|
||||
return pushedSegment;
|
||||
}
|
||||
};
|
||||
|
||||
// Create firehose + plumber
|
||||
final FireDepartmentMetrics metrics = new FireDepartmentMetrics();
|
||||
final Firehose firehose = firehoseFactory.connect();
|
||||
final Plumber plumber = new YeOldePlumberSchool(
|
||||
interval,
|
||||
myLock.getVersion(),
|
||||
wrappedDataSegmentPusher,
|
||||
tmpDir
|
||||
).findPlumber(schema, metrics);
|
||||
|
||||
// rowFlushBoundary for this job
|
||||
final int myRowFlushBoundary = this.rowFlushBoundary > 0
|
||||
? rowFlushBoundary
|
||||
: toolbox.getConfig().getDefaultRowFlushBoundary();
|
||||
|
||||
try {
|
||||
while (firehose.hasMore()) {
|
||||
final InputRow inputRow = firehose.nextRow();
|
||||
|
||||
if (shouldIndex(inputRow)) {
|
||||
final Sink sink = plumber.getSink(inputRow.getTimestampFromEpoch());
|
||||
if (sink == null) {
|
||||
throw new NullPointerException(
|
||||
String.format(
|
||||
"Was expecting non-null sink for timestamp[%s]",
|
||||
new DateTime(inputRow.getTimestampFromEpoch())
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
int numRows = sink.add(inputRow);
|
||||
metrics.incrementProcessed();
|
||||
|
||||
if (numRows >= myRowFlushBoundary) {
|
||||
plumber.persist(firehose.commit());
|
||||
}
|
||||
} else {
|
||||
metrics.incrementThrownAway();
|
||||
}
|
||||
}
|
||||
}
|
||||
finally {
|
||||
firehose.close();
|
||||
}
|
||||
|
||||
plumber.persist(firehose.commit());
|
||||
plumber.finishJob();
|
||||
|
||||
// Output metrics
|
||||
log.info(
|
||||
"Task[%s] took in %,d rows (%,d processed, %,d unparseable, %,d thrown away) and output %,d rows",
|
||||
getId(),
|
||||
metrics.processed() + metrics.unparseable() + metrics.thrownAway(),
|
||||
metrics.processed(),
|
||||
metrics.unparseable(),
|
||||
metrics.thrownAway(),
|
||||
metrics.rowOutput()
|
||||
);
|
||||
|
||||
// Request segment pushes
|
||||
toolbox.getTaskActionClient().submit(new SegmentInsertAction(ImmutableSet.copyOf(pushedSegments)));
|
||||
|
||||
// Done
|
||||
return TaskStatus.success(getId());
|
||||
}
|
||||
|
||||
/**
|
||||
* Should we index this inputRow? Decision is based on our interval and shardSpec.
|
||||
*
|
||||
* @param inputRow the row to check
|
||||
*
|
||||
* @return true or false
|
||||
*/
|
||||
private boolean shouldIndex(InputRow inputRow)
|
||||
{
|
||||
if (getImplicitLockInterval().get().contains(inputRow.getTimestampFromEpoch())) {
|
||||
return schema.getShardSpec().isInChunk(inputRow);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@JsonProperty("firehose")
|
||||
public FirehoseFactory getFirehoseFactory()
|
||||
{
|
||||
return firehoseFactory;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public Schema getSchema()
|
||||
{
|
||||
return schema;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public int getRowFlushBoundary()
|
||||
{
|
||||
return rowFlushBoundary;
|
||||
}
|
||||
}
|
|
@ -22,26 +22,48 @@ package io.druid.indexing.common.task;
|
|||
import com.fasterxml.jackson.annotation.JsonCreator;
|
||||
import com.fasterxml.jackson.annotation.JsonIgnore;
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
import com.google.api.client.util.Sets;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
import com.google.common.collect.Ordering;
|
||||
import com.google.common.collect.TreeMultiset;
|
||||
import com.google.common.primitives.Ints;
|
||||
import com.metamx.common.logger.Logger;
|
||||
import io.druid.data.input.Firehose;
|
||||
import io.druid.data.input.FirehoseFactory;
|
||||
import io.druid.data.input.InputRow;
|
||||
import io.druid.data.input.impl.SpatialDimensionSchema;
|
||||
import io.druid.granularity.QueryGranularity;
|
||||
import io.druid.indexer.granularity.GranularitySpec;
|
||||
import io.druid.indexing.common.TaskLock;
|
||||
import io.druid.indexing.common.TaskStatus;
|
||||
import io.druid.indexing.common.TaskToolbox;
|
||||
import io.druid.indexing.common.actions.SpawnTasksAction;
|
||||
import io.druid.indexing.common.actions.TaskActionClient;
|
||||
import io.druid.indexing.common.actions.SegmentInsertAction;
|
||||
import io.druid.indexing.common.index.YeOldePlumberSchool;
|
||||
import io.druid.query.aggregation.AggregatorFactory;
|
||||
import io.druid.segment.loading.DataSegmentPusher;
|
||||
import io.druid.segment.realtime.FireDepartmentMetrics;
|
||||
import io.druid.segment.realtime.Schema;
|
||||
import io.druid.segment.realtime.plumber.Plumber;
|
||||
import io.druid.segment.realtime.plumber.Sink;
|
||||
import io.druid.timeline.DataSegment;
|
||||
import io.druid.timeline.partition.NoneShardSpec;
|
||||
import io.druid.timeline.partition.ShardSpec;
|
||||
import io.druid.timeline.partition.SingleDimensionShardSpec;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.Interval;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
|
||||
public class IndexTask extends AbstractTask
|
||||
public class IndexTask extends AbstractFixedIntervalTask
|
||||
{
|
||||
private static final Logger log = new Logger(IndexTask.class);
|
||||
|
||||
|
@ -58,7 +80,7 @@ public class IndexTask extends AbstractTask
|
|||
private final QueryGranularity indexGranularity;
|
||||
|
||||
@JsonIgnore
|
||||
private final long targetPartitionSize;
|
||||
private final int targetPartitionSize;
|
||||
|
||||
@JsonIgnore
|
||||
private final FirehoseFactory firehoseFactory;
|
||||
|
@ -74,7 +96,7 @@ public class IndexTask extends AbstractTask
|
|||
@JsonProperty("spatialDimensions") List<SpatialDimensionSchema> spatialDimensions,
|
||||
@JsonProperty("aggregators") AggregatorFactory[] aggregators,
|
||||
@JsonProperty("indexGranularity") QueryGranularity indexGranularity,
|
||||
@JsonProperty("targetPartitionSize") long targetPartitionSize,
|
||||
@JsonProperty("targetPartitionSize") int targetPartitionSize,
|
||||
@JsonProperty("firehose") FirehoseFactory firehoseFactory,
|
||||
@JsonProperty("rowFlushBoundary") int rowFlushBoundary
|
||||
)
|
||||
|
@ -96,58 +118,10 @@ public class IndexTask extends AbstractTask
|
|||
this.aggregators = aggregators;
|
||||
this.indexGranularity = (indexGranularity == null) ? QueryGranularity.NONE : indexGranularity;
|
||||
this.targetPartitionSize = targetPartitionSize;
|
||||
this.firehoseFactory = firehoseFactory;
|
||||
this.firehoseFactory = Preconditions.checkNotNull(firehoseFactory, "firehoseFactory");
|
||||
this.rowFlushBoundary = rowFlushBoundary;
|
||||
}
|
||||
|
||||
public List<Task> toSubtasks()
|
||||
{
|
||||
final List<Task> retVal = Lists.newArrayList();
|
||||
|
||||
for (final Interval interval : granularitySpec.bucketIntervals()) {
|
||||
if (targetPartitionSize > 0) {
|
||||
// Need to do one pass over the data before indexing in order to determine good partitions
|
||||
retVal.add(
|
||||
new IndexDeterminePartitionsTask(
|
||||
null,
|
||||
getGroupId(),
|
||||
interval,
|
||||
firehoseFactory,
|
||||
new Schema(
|
||||
getDataSource(),
|
||||
spatialDimensions,
|
||||
aggregators,
|
||||
indexGranularity,
|
||||
new NoneShardSpec()
|
||||
),
|
||||
targetPartitionSize,
|
||||
rowFlushBoundary
|
||||
)
|
||||
);
|
||||
} else {
|
||||
// Jump straight into indexing
|
||||
retVal.add(
|
||||
new IndexGeneratorTask(
|
||||
null,
|
||||
getGroupId(),
|
||||
interval,
|
||||
firehoseFactory,
|
||||
new Schema(
|
||||
getDataSource(),
|
||||
spatialDimensions,
|
||||
aggregators,
|
||||
indexGranularity,
|
||||
new NoneShardSpec()
|
||||
),
|
||||
rowFlushBoundary
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
return retVal;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getType()
|
||||
{
|
||||
|
@ -155,16 +129,278 @@ public class IndexTask extends AbstractTask
|
|||
}
|
||||
|
||||
@Override
|
||||
public TaskStatus preflight(TaskActionClient taskActionClient) throws Exception
|
||||
public TaskStatus run(TaskToolbox toolbox) throws Exception
|
||||
{
|
||||
taskActionClient.submit(new SpawnTasksAction(toSubtasks()));
|
||||
final TaskLock myLock = Iterables.getOnlyElement(getTaskLocks(toolbox));
|
||||
final Set<DataSegment> segments = Sets.newHashSet();
|
||||
for (final Interval bucket : granularitySpec.bucketIntervals()) {
|
||||
final List<ShardSpec> shardSpecs;
|
||||
if (targetPartitionSize > 0) {
|
||||
shardSpecs = determinePartitions(bucket, targetPartitionSize);
|
||||
} else {
|
||||
shardSpecs = ImmutableList.<ShardSpec>of(new NoneShardSpec());
|
||||
}
|
||||
for (final ShardSpec shardSpec : shardSpecs) {
|
||||
final DataSegment segment = generateSegment(
|
||||
toolbox,
|
||||
new Schema(
|
||||
getDataSource(),
|
||||
spatialDimensions,
|
||||
aggregators,
|
||||
indexGranularity,
|
||||
shardSpec
|
||||
),
|
||||
bucket,
|
||||
myLock.getVersion()
|
||||
);
|
||||
segments.add(segment);
|
||||
}
|
||||
}
|
||||
toolbox.pushSegments(segments);
|
||||
return TaskStatus.success(getId());
|
||||
}
|
||||
|
||||
@Override
|
||||
public TaskStatus run(TaskToolbox toolbox) throws Exception
|
||||
private List<ShardSpec> determinePartitions(
|
||||
final Interval interval,
|
||||
final int targetPartitionSize
|
||||
) throws IOException
|
||||
{
|
||||
throw new IllegalStateException("IndexTasks should not be run!");
|
||||
log.info("Determining partitions for interval[%s] with targetPartitionSize[%d]", interval, targetPartitionSize);
|
||||
|
||||
// The implementation of this determine partitions stuff is less than optimal. Should be done better.
|
||||
|
||||
// Blacklist dimensions that have multiple values per row
|
||||
final Set<String> unusableDimensions = com.google.common.collect.Sets.newHashSet();
|
||||
// Track values of all non-blacklisted dimensions
|
||||
final Map<String, TreeMultiset<String>> dimensionValueMultisets = Maps.newHashMap();
|
||||
|
||||
// Load data
|
||||
try (Firehose firehose = firehoseFactory.connect()) {
|
||||
while (firehose.hasMore()) {
|
||||
final InputRow inputRow = firehose.nextRow();
|
||||
if (interval.contains(inputRow.getTimestampFromEpoch())) {
|
||||
// Extract dimensions from event
|
||||
for (final String dim : inputRow.getDimensions()) {
|
||||
final List<String> dimValues = inputRow.getDimension(dim);
|
||||
if (!unusableDimensions.contains(dim)) {
|
||||
if (dimValues.size() == 1) {
|
||||
// Track this value
|
||||
TreeMultiset<String> dimensionValueMultiset = dimensionValueMultisets.get(dim);
|
||||
if (dimensionValueMultiset == null) {
|
||||
dimensionValueMultiset = TreeMultiset.create();
|
||||
dimensionValueMultisets.put(dim, dimensionValueMultiset);
|
||||
}
|
||||
dimensionValueMultiset.add(dimValues.get(0));
|
||||
} else {
|
||||
// Only single-valued dimensions can be used for partitions
|
||||
unusableDimensions.add(dim);
|
||||
dimensionValueMultisets.remove(dim);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ShardSpecs we will return
|
||||
final List<ShardSpec> shardSpecs = Lists.newArrayList();
|
||||
|
||||
// Select highest-cardinality dimension
|
||||
Ordering<Map.Entry<String, TreeMultiset<String>>> byCardinalityOrdering = new Ordering<Map.Entry<String, TreeMultiset<String>>>()
|
||||
{
|
||||
@Override
|
||||
public int compare(
|
||||
Map.Entry<String, TreeMultiset<String>> left,
|
||||
Map.Entry<String, TreeMultiset<String>> right
|
||||
)
|
||||
{
|
||||
return Ints.compare(left.getValue().elementSet().size(), right.getValue().elementSet().size());
|
||||
}
|
||||
};
|
||||
|
||||
if (dimensionValueMultisets.isEmpty()) {
|
||||
// No suitable partition dimension. We'll make one big segment and hope for the best.
|
||||
log.info("No suitable partition dimension found");
|
||||
shardSpecs.add(new NoneShardSpec());
|
||||
} else {
|
||||
// Find best partition dimension (heuristic: highest cardinality).
|
||||
final Map.Entry<String, TreeMultiset<String>> partitionEntry =
|
||||
byCardinalityOrdering.max(dimensionValueMultisets.entrySet());
|
||||
|
||||
final String partitionDim = partitionEntry.getKey();
|
||||
final TreeMultiset<String> partitionDimValues = partitionEntry.getValue();
|
||||
|
||||
log.info(
|
||||
"Partitioning on dimension[%s] with cardinality[%d] over rows[%d]",
|
||||
partitionDim,
|
||||
partitionDimValues.elementSet().size(),
|
||||
partitionDimValues.size()
|
||||
);
|
||||
|
||||
// Iterate over unique partition dimension values in sorted order
|
||||
String currentPartitionStart = null;
|
||||
int currentPartitionSize = 0;
|
||||
for (final String partitionDimValue : partitionDimValues.elementSet()) {
|
||||
currentPartitionSize += partitionDimValues.count(partitionDimValue);
|
||||
if (currentPartitionSize >= targetPartitionSize) {
|
||||
final ShardSpec shardSpec = new SingleDimensionShardSpec(
|
||||
partitionDim,
|
||||
currentPartitionStart,
|
||||
partitionDimValue,
|
||||
shardSpecs.size()
|
||||
);
|
||||
|
||||
log.info("Adding shard: %s", shardSpec);
|
||||
shardSpecs.add(shardSpec);
|
||||
|
||||
currentPartitionSize = partitionDimValues.count(partitionDimValue);
|
||||
currentPartitionStart = partitionDimValue;
|
||||
}
|
||||
}
|
||||
|
||||
if (currentPartitionSize > 0) {
|
||||
// One last shard to go
|
||||
final ShardSpec shardSpec;
|
||||
|
||||
if (shardSpecs.isEmpty()) {
|
||||
shardSpec = new NoneShardSpec();
|
||||
} else {
|
||||
shardSpec = new SingleDimensionShardSpec(
|
||||
partitionDim,
|
||||
currentPartitionStart,
|
||||
null,
|
||||
shardSpecs.size()
|
||||
);
|
||||
}
|
||||
|
||||
log.info("Adding shard: %s", shardSpec);
|
||||
shardSpecs.add(shardSpec);
|
||||
}
|
||||
}
|
||||
|
||||
return shardSpecs;
|
||||
}
|
||||
|
||||
private DataSegment generateSegment(
|
||||
final TaskToolbox toolbox,
|
||||
final Schema schema,
|
||||
final Interval interval,
|
||||
final String version
|
||||
) throws IOException
|
||||
{
|
||||
// Set up temporary directory.
|
||||
final File tmpDir = new File(
|
||||
toolbox.getTaskWorkDir(),
|
||||
String.format(
|
||||
"%s_%s_%s_%s_%s",
|
||||
this.getDataSource(),
|
||||
interval.getStart(),
|
||||
interval.getEnd(),
|
||||
version,
|
||||
schema.getShardSpec().getPartitionNum()
|
||||
)
|
||||
);
|
||||
|
||||
// We need to track published segments.
|
||||
final List<DataSegment> pushedSegments = new CopyOnWriteArrayList<DataSegment>();
|
||||
final DataSegmentPusher wrappedDataSegmentPusher = new DataSegmentPusher()
|
||||
{
|
||||
@Override
|
||||
public String getPathForHadoop(String dataSource)
|
||||
{
|
||||
return toolbox.getSegmentPusher().getPathForHadoop(dataSource);
|
||||
}
|
||||
|
||||
@Override
|
||||
public DataSegment push(File file, DataSegment segment) throws IOException
|
||||
{
|
||||
final DataSegment pushedSegment = toolbox.getSegmentPusher().push(file, segment);
|
||||
pushedSegments.add(pushedSegment);
|
||||
return pushedSegment;
|
||||
}
|
||||
};
|
||||
|
||||
// Create firehose + plumber
|
||||
final FireDepartmentMetrics metrics = new FireDepartmentMetrics();
|
||||
final Firehose firehose = firehoseFactory.connect();
|
||||
final Plumber plumber = new YeOldePlumberSchool(
|
||||
interval,
|
||||
version,
|
||||
wrappedDataSegmentPusher,
|
||||
tmpDir
|
||||
).findPlumber(schema, metrics);
|
||||
|
||||
// rowFlushBoundary for this job
|
||||
final int myRowFlushBoundary = this.rowFlushBoundary > 0
|
||||
? rowFlushBoundary
|
||||
: toolbox.getConfig().getDefaultRowFlushBoundary();
|
||||
|
||||
try {
|
||||
plumber.startJob();
|
||||
|
||||
while (firehose.hasMore()) {
|
||||
final InputRow inputRow = firehose.nextRow();
|
||||
|
||||
if (shouldIndex(schema, interval, inputRow)) {
|
||||
final Sink sink = plumber.getSink(inputRow.getTimestampFromEpoch());
|
||||
if (sink == null) {
|
||||
throw new NullPointerException(
|
||||
String.format(
|
||||
"Was expecting non-null sink for timestamp[%s]",
|
||||
new DateTime(inputRow.getTimestampFromEpoch())
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
int numRows = sink.add(inputRow);
|
||||
metrics.incrementProcessed();
|
||||
|
||||
if (numRows >= myRowFlushBoundary) {
|
||||
plumber.persist(firehose.commit());
|
||||
}
|
||||
} else {
|
||||
metrics.incrementThrownAway();
|
||||
}
|
||||
}
|
||||
}
|
||||
finally {
|
||||
firehose.close();
|
||||
}
|
||||
|
||||
plumber.persist(firehose.commit());
|
||||
|
||||
try {
|
||||
plumber.finishJob();
|
||||
}
|
||||
finally {
|
||||
log.info(
|
||||
"Task[%s] interval[%s] partition[%d] took in %,d rows (%,d processed, %,d unparseable, %,d thrown away)"
|
||||
+ " and output %,d rows",
|
||||
getId(),
|
||||
interval,
|
||||
schema.getShardSpec().getPartitionNum(),
|
||||
metrics.processed() + metrics.unparseable() + metrics.thrownAway(),
|
||||
metrics.processed(),
|
||||
metrics.unparseable(),
|
||||
metrics.thrownAway(),
|
||||
metrics.rowOutput()
|
||||
);
|
||||
}
|
||||
|
||||
// We expect a single segment to have been created.
|
||||
return Iterables.getOnlyElement(pushedSegments);
|
||||
}
|
||||
|
||||
/**
|
||||
* Should we index this inputRow? Decision is based on our interval and shardSpec.
|
||||
*
|
||||
* @param inputRow the row to check
|
||||
*
|
||||
* @return true or false
|
||||
*/
|
||||
private boolean shouldIndex(final Schema schema, final Interval interval, final InputRow inputRow)
|
||||
{
|
||||
return interval.contains(inputRow.getTimestampFromEpoch()) && schema.getShardSpec().isInChunk(inputRow);
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
|
@ -191,7 +427,7 @@ public class IndexTask extends AbstractTask
|
|||
return targetPartitionSize;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
@JsonProperty("firehose")
|
||||
public FirehoseFactory getFirehoseFactory()
|
||||
{
|
||||
return firehoseFactory;
|
||||
|
@ -202,4 +438,10 @@ public class IndexTask extends AbstractTask
|
|||
{
|
||||
return rowFlushBoundary;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public List<SpatialDimensionSchema> getSpatialDimensions()
|
||||
{
|
||||
return spatialDimensions;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,7 +28,6 @@ import com.metamx.common.logger.Logger;
|
|||
import io.druid.indexing.common.TaskLock;
|
||||
import io.druid.indexing.common.TaskStatus;
|
||||
import io.druid.indexing.common.TaskToolbox;
|
||||
import io.druid.indexing.common.actions.LockListAction;
|
||||
import io.druid.indexing.common.actions.SegmentListUnusedAction;
|
||||
import io.druid.indexing.common.actions.SegmentNukeAction;
|
||||
import io.druid.timeline.DataSegment;
|
||||
|
@ -38,7 +37,7 @@ import java.util.List;
|
|||
|
||||
/**
|
||||
*/
|
||||
public class KillTask extends AbstractTask
|
||||
public class KillTask extends AbstractFixedIntervalTask
|
||||
{
|
||||
private static final Logger log = new Logger(KillTask.class);
|
||||
|
||||
|
@ -68,12 +67,12 @@ public class KillTask extends AbstractTask
|
|||
// Confirm we have a lock (will throw if there isn't exactly one element)
|
||||
final TaskLock myLock = Iterables.getOnlyElement(getTaskLocks(toolbox));
|
||||
|
||||
if(!myLock.getDataSource().equals(getDataSource())) {
|
||||
if (!myLock.getDataSource().equals(getDataSource())) {
|
||||
throw new ISE("WTF?! Lock dataSource[%s] != task dataSource[%s]", myLock.getDataSource(), getDataSource());
|
||||
}
|
||||
|
||||
if(!myLock.getInterval().equals(getImplicitLockInterval().get())) {
|
||||
throw new ISE("WTF?! Lock interval[%s] != task interval[%s]", myLock.getInterval(), getImplicitLockInterval().get());
|
||||
if (!myLock.getInterval().equals(getInterval())) {
|
||||
throw new ISE("WTF?! Lock interval[%s] != task interval[%s]", myLock.getInterval(), getInterval());
|
||||
}
|
||||
|
||||
// List unused segments
|
||||
|
@ -82,8 +81,8 @@ public class KillTask extends AbstractTask
|
|||
.submit(new SegmentListUnusedAction(myLock.getDataSource(), myLock.getInterval()));
|
||||
|
||||
// Verify none of these segments have versions > lock version
|
||||
for(final DataSegment unusedSegment : unusedSegments) {
|
||||
if(unusedSegment.getVersion().compareTo(myLock.getVersion()) > 0) {
|
||||
for (final DataSegment unusedSegment : unusedSegments) {
|
||||
if (unusedSegment.getVersion().compareTo(myLock.getVersion()) > 0) {
|
||||
throw new ISE(
|
||||
"WTF?! Unused segment[%s] has version[%s] > task version[%s]",
|
||||
unusedSegment.getIdentifier(),
|
||||
|
@ -98,11 +97,9 @@ public class KillTask extends AbstractTask
|
|||
// Kill segments
|
||||
for (DataSegment segment : unusedSegments) {
|
||||
toolbox.getDataSegmentKiller().kill(segment);
|
||||
toolbox.getTaskActionClient().submit(new SegmentNukeAction(ImmutableSet.of(segment)));
|
||||
}
|
||||
|
||||
// Remove metadata for these segments
|
||||
toolbox.getTaskActionClient().submit(new SegmentNukeAction(ImmutableSet.copyOf(unusedSegments)));
|
||||
|
||||
return TaskStatus.success(getId());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ import com.google.common.base.Joiner;
|
|||
import com.google.common.base.Objects;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.base.Predicate;
|
||||
import com.google.common.base.Throwables;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.collect.Lists;
|
||||
|
@ -41,9 +41,8 @@ import com.metamx.emitter.service.ServiceMetricEvent;
|
|||
import io.druid.indexing.common.TaskLock;
|
||||
import io.druid.indexing.common.TaskStatus;
|
||||
import io.druid.indexing.common.TaskToolbox;
|
||||
import io.druid.indexing.common.actions.LockAcquireAction;
|
||||
import io.druid.indexing.common.actions.LockListAction;
|
||||
import io.druid.indexing.common.actions.SegmentInsertAction;
|
||||
import io.druid.indexing.common.actions.SegmentListUsedAction;
|
||||
import io.druid.indexing.common.actions.TaskActionClient;
|
||||
import io.druid.segment.IndexIO;
|
||||
import io.druid.timeline.DataSegment;
|
||||
|
@ -53,14 +52,13 @@ import org.joda.time.Interval;
|
|||
|
||||
import javax.annotation.Nullable;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
*/
|
||||
public abstract class MergeTaskBase extends AbstractTask
|
||||
public abstract class MergeTaskBase extends AbstractFixedIntervalTask
|
||||
{
|
||||
@JsonIgnore
|
||||
private final List<DataSegment> segments;
|
||||
|
@ -145,7 +143,7 @@ public abstract class MergeTaskBase extends AbstractTask
|
|||
);
|
||||
|
||||
// download segments to merge
|
||||
final Map<DataSegment, File> gettedSegments = toolbox.getSegments(segments);
|
||||
final Map<DataSegment, File> gettedSegments = toolbox.fetchSegments(segments);
|
||||
|
||||
// merge files together
|
||||
final File fileToUpload = merge(gettedSegments, new File(taskDir, "merged"));
|
||||
|
@ -168,7 +166,7 @@ public abstract class MergeTaskBase extends AbstractTask
|
|||
emitter.emit(builder.build("merger/uploadTime", System.currentTimeMillis() - uploadStart));
|
||||
emitter.emit(builder.build("merger/mergeSize", uploadedSegment.getSize()));
|
||||
|
||||
toolbox.getTaskActionClient().submit(new SegmentInsertAction(ImmutableSet.of(uploadedSegment)));
|
||||
toolbox.pushSegments(ImmutableList.of(uploadedSegment));
|
||||
|
||||
return TaskStatus.success(getId());
|
||||
}
|
||||
|
@ -186,9 +184,12 @@ public abstract class MergeTaskBase extends AbstractTask
|
|||
* we are operating on every segment that overlaps the chosen interval.
|
||||
*/
|
||||
@Override
|
||||
public TaskStatus preflight(TaskActionClient taskActionClient)
|
||||
public boolean isReady(TaskActionClient taskActionClient) throws Exception
|
||||
{
|
||||
try {
|
||||
// Try to acquire lock
|
||||
if (!super.isReady(taskActionClient)) {
|
||||
return false;
|
||||
} else {
|
||||
final Function<DataSegment, String> toIdentifier = new Function<DataSegment, String>()
|
||||
{
|
||||
@Override
|
||||
|
@ -199,7 +200,10 @@ public abstract class MergeTaskBase extends AbstractTask
|
|||
};
|
||||
|
||||
final Set<String> current = ImmutableSet.copyOf(
|
||||
Iterables.transform(taskActionClient.submit(defaultListUsedAction()), toIdentifier)
|
||||
Iterables.transform(
|
||||
taskActionClient.submit(new SegmentListUsedAction(getDataSource(), getInterval())),
|
||||
toIdentifier
|
||||
)
|
||||
);
|
||||
final Set<String> requested = ImmutableSet.copyOf(Iterables.transform(segments, toIdentifier));
|
||||
|
||||
|
@ -219,10 +223,7 @@ public abstract class MergeTaskBase extends AbstractTask
|
|||
);
|
||||
}
|
||||
|
||||
return TaskStatus.running(getId());
|
||||
}
|
||||
catch (IOException e) {
|
||||
throw Throwables.propagate(e);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -241,7 +242,7 @@ public abstract class MergeTaskBase extends AbstractTask
|
|||
return Objects.toStringHelper(this)
|
||||
.add("id", getId())
|
||||
.add("dataSource", getDataSource())
|
||||
.add("interval", getImplicitLockInterval())
|
||||
.add("interval", getInterval())
|
||||
.add("segments", segments)
|
||||
.toString();
|
||||
}
|
||||
|
|
|
@ -0,0 +1,115 @@
|
|||
/*
|
||||
* Druid - a distributed column store.
|
||||
* Copyright (C) 2012, 2013 Metamarkets Group Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
package io.druid.indexing.common.task;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonCreator;
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.metamx.common.ISE;
|
||||
import com.metamx.common.logger.Logger;
|
||||
import io.druid.indexing.common.TaskLock;
|
||||
import io.druid.indexing.common.TaskStatus;
|
||||
import io.druid.indexing.common.TaskToolbox;
|
||||
import io.druid.indexing.common.actions.SegmentListUnusedAction;
|
||||
import io.druid.indexing.common.actions.SegmentMetadataUpdateAction;
|
||||
import io.druid.timeline.DataSegment;
|
||||
import org.joda.time.Interval;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
public class MoveTask extends AbstractFixedIntervalTask
|
||||
{
|
||||
private static final Logger log = new Logger(MoveTask.class);
|
||||
|
||||
private final Map<String, Object> targetLoadSpec;
|
||||
|
||||
@JsonCreator
|
||||
public MoveTask(
|
||||
@JsonProperty("id") String id,
|
||||
@JsonProperty("dataSource") String dataSource,
|
||||
@JsonProperty("interval") Interval interval,
|
||||
@JsonProperty("target") Map<String, Object> targetLoadSpec
|
||||
)
|
||||
{
|
||||
super(
|
||||
TaskUtils.makeId(id, "move", dataSource, interval),
|
||||
dataSource,
|
||||
interval
|
||||
);
|
||||
this.targetLoadSpec = targetLoadSpec;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getType()
|
||||
{
|
||||
return "move";
|
||||
}
|
||||
|
||||
@Override
|
||||
public TaskStatus run(TaskToolbox toolbox) throws Exception
|
||||
{
|
||||
// Confirm we have a lock (will throw if there isn't exactly one element)
|
||||
final TaskLock myLock = Iterables.getOnlyElement(getTaskLocks(toolbox));
|
||||
|
||||
if(!myLock.getDataSource().equals(getDataSource())) {
|
||||
throw new ISE("WTF?! Lock dataSource[%s] != task dataSource[%s]", myLock.getDataSource(), getDataSource());
|
||||
}
|
||||
|
||||
if(!myLock.getInterval().equals(getInterval())) {
|
||||
throw new ISE("WTF?! Lock interval[%s] != task interval[%s]", myLock.getInterval(), getInterval());
|
||||
}
|
||||
|
||||
// List unused segments
|
||||
final List<DataSegment> unusedSegments = toolbox
|
||||
.getTaskActionClient()
|
||||
.submit(new SegmentListUnusedAction(myLock.getDataSource(), myLock.getInterval()));
|
||||
|
||||
// Verify none of these segments have versions > lock version
|
||||
for(final DataSegment unusedSegment : unusedSegments) {
|
||||
if(unusedSegment.getVersion().compareTo(myLock.getVersion()) > 0) {
|
||||
throw new ISE(
|
||||
"WTF?! Unused segment[%s] has version[%s] > task version[%s]",
|
||||
unusedSegment.getIdentifier(),
|
||||
unusedSegment.getVersion(),
|
||||
myLock.getVersion()
|
||||
);
|
||||
}
|
||||
|
||||
log.info("OK to move segment: %s", unusedSegment.getIdentifier());
|
||||
}
|
||||
|
||||
// Move segments
|
||||
for (DataSegment segment : unusedSegments) {
|
||||
final DataSegment movedSegment = toolbox.getDataSegmentMover().move(segment, targetLoadSpec);
|
||||
toolbox.getTaskActionClient().submit(new SegmentMetadataUpdateAction(ImmutableSet.of(movedSegment)));
|
||||
}
|
||||
|
||||
return TaskStatus.success(getId());
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public Map<String, Object> getTargetLoadSpec()
|
||||
{
|
||||
return targetLoadSpec;
|
||||
}
|
||||
}
|
|
@ -20,41 +20,63 @@
|
|||
package io.druid.indexing.common.task;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonCreator;
|
||||
import com.fasterxml.jackson.annotation.JsonIgnore;
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
import com.metamx.common.ISE;
|
||||
import com.metamx.common.logger.Logger;
|
||||
import io.druid.data.input.FirehoseFactory;
|
||||
import io.druid.indexing.common.TaskStatus;
|
||||
import io.druid.indexing.common.TaskToolbox;
|
||||
import io.druid.indexing.common.actions.TaskActionClient;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.Interval;
|
||||
import org.joda.time.Period;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class NoopTask extends AbstractTask
|
||||
{
|
||||
private static final Logger log = new Logger(NoopTask.class);
|
||||
private static int defaultRunTime = 2500;
|
||||
private static final int defaultRunTime = 2500;
|
||||
private static final int defaultIsReadyTime = 0;
|
||||
private static final IsReadyResult defaultIsReadyResult = IsReadyResult.YES;
|
||||
|
||||
private final int runTime;
|
||||
enum IsReadyResult
|
||||
{
|
||||
YES,
|
||||
NO,
|
||||
EXCEPTION
|
||||
}
|
||||
|
||||
@JsonIgnore
|
||||
private final long runTime;
|
||||
|
||||
@JsonIgnore
|
||||
private final long isReadyTime;
|
||||
|
||||
@JsonIgnore
|
||||
private final IsReadyResult isReadyResult;
|
||||
|
||||
@JsonIgnore
|
||||
private final FirehoseFactory firehoseFactory;
|
||||
|
||||
@JsonCreator
|
||||
public NoopTask(
|
||||
@JsonProperty("id") String id,
|
||||
@JsonProperty("interval") Interval interval,
|
||||
@JsonProperty("runTime") int runTime,
|
||||
@JsonProperty("runTime") long runTime,
|
||||
@JsonProperty("isReadyTime") long isReadyTime,
|
||||
@JsonProperty("isReadyResult") String isReadyResult,
|
||||
@JsonProperty("firehose") FirehoseFactory firehoseFactory
|
||||
)
|
||||
{
|
||||
super(
|
||||
id == null ? String.format("noop_%s", new DateTime()) : id,
|
||||
"none",
|
||||
interval == null ? new Interval(Period.days(1), new DateTime()) : interval
|
||||
"none"
|
||||
);
|
||||
|
||||
this.runTime = (runTime == 0) ? defaultRunTime : runTime;
|
||||
|
||||
this.isReadyTime = (isReadyTime == 0) ? defaultIsReadyTime : isReadyTime;
|
||||
this.isReadyResult = (isReadyResult == null)
|
||||
? defaultIsReadyResult
|
||||
: IsReadyResult.valueOf(isReadyResult.toUpperCase());
|
||||
this.firehoseFactory = firehoseFactory;
|
||||
}
|
||||
|
||||
|
@ -64,18 +86,45 @@ public class NoopTask extends AbstractTask
|
|||
return "noop";
|
||||
}
|
||||
|
||||
@JsonProperty("runTime")
|
||||
public int getRunTime()
|
||||
@JsonProperty
|
||||
public long getRunTime()
|
||||
{
|
||||
return runTime;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public long getIsReadyTime()
|
||||
{
|
||||
return isReadyTime;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public IsReadyResult getIsReadyResult()
|
||||
{
|
||||
return isReadyResult;
|
||||
}
|
||||
|
||||
@JsonProperty("firehose")
|
||||
public FirehoseFactory getFirehoseFactory()
|
||||
{
|
||||
return firehoseFactory;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isReady(TaskActionClient taskActionClient) throws Exception
|
||||
{
|
||||
switch (isReadyResult) {
|
||||
case YES:
|
||||
return true;
|
||||
case NO:
|
||||
return false;
|
||||
case EXCEPTION:
|
||||
throw new ISE("Not ready. Never will be ready. Go away!");
|
||||
default:
|
||||
throw new AssertionError("#notreached");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public TaskStatus run(TaskToolbox toolbox) throws Exception
|
||||
{
|
||||
|
@ -90,4 +139,9 @@ public class NoopTask extends AbstractTask
|
|||
log.info("Woke up!");
|
||||
return TaskStatus.success(getId());
|
||||
}
|
||||
|
||||
public static NoopTask create()
|
||||
{
|
||||
return new NoopTask(null, 0, 0, null, null);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,7 +24,6 @@ import com.fasterxml.jackson.annotation.JsonIgnore;
|
|||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
import com.google.common.base.Throwables;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.io.Closeables;
|
||||
import com.metamx.common.exception.FormattedException;
|
||||
import com.metamx.emitter.EmittingLogger;
|
||||
|
@ -35,9 +34,8 @@ import io.druid.indexing.common.TaskLock;
|
|||
import io.druid.indexing.common.TaskStatus;
|
||||
import io.druid.indexing.common.TaskToolbox;
|
||||
import io.druid.indexing.common.actions.LockAcquireAction;
|
||||
import io.druid.indexing.common.actions.LockListAction;
|
||||
import io.druid.indexing.common.actions.LockReleaseAction;
|
||||
import io.druid.indexing.common.actions.SegmentInsertAction;
|
||||
import io.druid.indexing.common.actions.TaskActionClient;
|
||||
import io.druid.query.FinalizeResultsQueryRunner;
|
||||
import io.druid.query.Query;
|
||||
import io.druid.query.QueryRunner;
|
||||
|
@ -130,8 +128,7 @@ public class RealtimeIndexTask extends AbstractTask
|
|||
), 1
|
||||
)
|
||||
: taskResource,
|
||||
schema.getDataSource(),
|
||||
null
|
||||
schema.getDataSource()
|
||||
);
|
||||
|
||||
this.schema = schema;
|
||||
|
@ -167,6 +164,12 @@ public class RealtimeIndexTask extends AbstractTask
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isReady(TaskActionClient taskActionClient) throws Exception
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TaskStatus run(final TaskToolbox toolbox) throws Exception
|
||||
{
|
||||
|
@ -206,7 +209,7 @@ public class RealtimeIndexTask extends AbstractTask
|
|||
@Override
|
||||
public void announceSegment(final DataSegment segment) throws IOException
|
||||
{
|
||||
// NOTE: Side effect: Calling announceSegment causes a lock to be acquired
|
||||
// Side effect: Calling announceSegment causes a lock to be acquired
|
||||
toolbox.getTaskActionClient().submit(new LockAcquireAction(segment.getInterval()));
|
||||
toolbox.getSegmentAnnouncer().announceSegment(segment);
|
||||
}
|
||||
|
@ -225,6 +228,7 @@ public class RealtimeIndexTask extends AbstractTask
|
|||
@Override
|
||||
public void announceSegments(Iterable<DataSegment> segments) throws IOException
|
||||
{
|
||||
// Side effect: Calling announceSegments causes locks to be acquired
|
||||
for (DataSegment segment : segments) {
|
||||
toolbox.getTaskActionClient().submit(new LockAcquireAction(segment.getInterval()));
|
||||
}
|
||||
|
@ -257,7 +261,7 @@ public class RealtimeIndexTask extends AbstractTask
|
|||
public String getVersion(final Interval interval)
|
||||
{
|
||||
try {
|
||||
// NOTE: Side effect: Calling getVersion causes a lock to be acquired
|
||||
// Side effect: Calling getVersion causes a lock to be acquired
|
||||
final TaskLock myLock = toolbox.getTaskActionClient()
|
||||
.submit(new LockAcquireAction(interval));
|
||||
|
||||
|
@ -337,11 +341,11 @@ public class RealtimeIndexTask extends AbstractTask
|
|||
}
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
catch (Throwable e) {
|
||||
normalExit = false;
|
||||
log.makeAlert(e, "Exception aborted realtime processing[%s]", schema.getDataSource())
|
||||
.emit();
|
||||
normalExit = false;
|
||||
throw Throwables.propagate(e);
|
||||
throw e;
|
||||
}
|
||||
finally {
|
||||
if (normalExit) {
|
||||
|
@ -412,7 +416,7 @@ public class RealtimeIndexTask extends AbstractTask
|
|||
@Override
|
||||
public void publishSegment(DataSegment segment) throws IOException
|
||||
{
|
||||
taskToolbox.getTaskActionClient().submit(new SegmentInsertAction(ImmutableSet.of(segment)));
|
||||
taskToolbox.pushSegments(ImmutableList.of(segment));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,27 +21,22 @@ package io.druid.indexing.common.task;
|
|||
|
||||
import com.fasterxml.jackson.annotation.JsonSubTypes;
|
||||
import com.fasterxml.jackson.annotation.JsonTypeInfo;
|
||||
import com.google.common.base.Optional;
|
||||
import io.druid.indexing.common.TaskStatus;
|
||||
import io.druid.indexing.common.TaskToolbox;
|
||||
import io.druid.indexing.common.actions.TaskActionClient;
|
||||
import io.druid.query.Query;
|
||||
import io.druid.query.QueryRunner;
|
||||
import org.joda.time.Interval;
|
||||
|
||||
/**
|
||||
* Represents a task that can run on a worker. The general contracts surrounding Tasks are:
|
||||
* <ul>
|
||||
* <li>Tasks must operate on a single datasource.</li>
|
||||
* <li>Tasks should be immutable, since the task ID is used as a proxy for the task in many locations.</li>
|
||||
* <li>Task IDs must be unique. This can be done by naming them using UUIDs or the current timestamp.</li>
|
||||
* <li>Tasks are each part of a "task group", which is a set of tasks that can share interval locks. These are
|
||||
* useful for producing sharded segments.</li>
|
||||
* <li>Tasks can optionally have an "implicit lock interval". Tasks with this property are guaranteed to have
|
||||
* a lock on that interval during their {@link #preflight(io.druid.indexing.common.actions.TaskActionClient)}
|
||||
* and {@link #run(io.druid.indexing.common.TaskToolbox)} methods.</li>
|
||||
* <li>Tasks do not need to explicitly release locks; they are released upon task completion. Tasks may choose
|
||||
* to release locks early if they desire.</li>
|
||||
* <li>Tasks must operate on a single datasource.</li>
|
||||
* <li>Tasks should be immutable, since the task ID is used as a proxy for the task in many locations.</li>
|
||||
* <li>Task IDs must be unique. This can be done by naming them using UUIDs or the current timestamp.</li>
|
||||
* <li>Tasks are each part of a "task group", which is a set of tasks that can share interval locks. These are
|
||||
* useful for producing sharded segments.</li>
|
||||
* <li>Tasks do not need to explicitly release locks; they are released upon task completion. Tasks may choose
|
||||
* to release locks early if they desire.</li>
|
||||
* </ul>
|
||||
*/
|
||||
@JsonTypeInfo(use = JsonTypeInfo.Id.NAME, property = "type")
|
||||
|
@ -50,9 +45,9 @@ import org.joda.time.Interval;
|
|||
@JsonSubTypes.Type(name = "merge", value = MergeTask.class),
|
||||
@JsonSubTypes.Type(name = "delete", value = DeleteTask.class),
|
||||
@JsonSubTypes.Type(name = "kill", value = KillTask.class),
|
||||
@JsonSubTypes.Type(name = "move", value = MoveTask.class),
|
||||
@JsonSubTypes.Type(name = "archive", value = ArchiveTask.class),
|
||||
@JsonSubTypes.Type(name = "index", value = IndexTask.class),
|
||||
@JsonSubTypes.Type(name = "index_partitions", value = IndexDeterminePartitionsTask.class),
|
||||
@JsonSubTypes.Type(name = "index_generator", value = IndexGeneratorTask.class),
|
||||
@JsonSubTypes.Type(name = "index_hadoop", value = HadoopIndexTask.class),
|
||||
@JsonSubTypes.Type(name = "index_realtime", value = RealtimeIndexTask.class),
|
||||
@JsonSubTypes.Type(name = "noop", value = NoopTask.class),
|
||||
|
@ -96,12 +91,6 @@ public interface Task
|
|||
*/
|
||||
public String getDataSource();
|
||||
|
||||
/**
|
||||
* Returns implicit lock interval for this task, if any. Tasks without implicit lock intervals are not granted locks
|
||||
* when started and must explicitly request them.
|
||||
*/
|
||||
public Optional<Interval> getImplicitLockInterval();
|
||||
|
||||
/**
|
||||
* Returns query runners for this task. If this task is not meant to answer queries over its datasource, this method
|
||||
* should return null.
|
||||
|
@ -109,18 +98,19 @@ public interface Task
|
|||
public <T> QueryRunner<T> getQueryRunner(Query<T> query);
|
||||
|
||||
/**
|
||||
* Execute preflight checks for a task. This typically runs on the coordinator, and will be run while
|
||||
* holding a lock on our dataSource and implicit lock interval (if any). If this method throws an exception, the
|
||||
* task should be considered a failure.
|
||||
* Execute preflight actions for a task. This can be used to acquire locks, check preconditions, and so on. The
|
||||
* actions must be idempotent, since this method may be executed multiple times. This typically runs on the
|
||||
* coordinator. If this method throws an exception, the task should be considered a failure.
|
||||
*
|
||||
* This method must be idempotent, as it may be run multiple times per task.
|
||||
*
|
||||
* @param taskActionClient action client for this task (not the full toolbox)
|
||||
*
|
||||
* @return Some kind of status (runnable means continue on to a worker, non-runnable means we completed without
|
||||
* using a worker).
|
||||
* @return true if ready, false if not ready yet
|
||||
*
|
||||
* @throws Exception
|
||||
* @throws Exception if the task should be considered a failure
|
||||
*/
|
||||
public TaskStatus preflight(TaskActionClient taskActionClient) throws Exception;
|
||||
public boolean isReady(TaskActionClient taskActionClient) throws Exception;
|
||||
|
||||
/**
|
||||
* Execute a task. This typically runs on a worker as determined by a TaskRunner, and will be run while
|
||||
|
|
|
@ -23,16 +23,15 @@ import com.fasterxml.jackson.annotation.JsonCreator;
|
|||
import com.fasterxml.jackson.annotation.JsonIgnore;
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
import com.google.common.base.Function;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.Sets;
|
||||
import com.metamx.common.ISE;
|
||||
import com.metamx.common.guava.FunctionalIterable;
|
||||
import com.metamx.common.logger.Logger;
|
||||
import io.druid.indexing.common.TaskStatus;
|
||||
import io.druid.indexing.common.TaskToolbox;
|
||||
import io.druid.indexing.common.actions.SegmentInsertAction;
|
||||
import io.druid.indexing.common.actions.SegmentListUsedAction;
|
||||
import io.druid.indexing.common.actions.SpawnTasksAction;
|
||||
import io.druid.indexing.common.actions.TaskActionClient;
|
||||
import io.druid.segment.IndexIO;
|
||||
import io.druid.segment.loading.SegmentLoadingException;
|
||||
|
@ -48,10 +47,10 @@ import java.util.Map;
|
|||
|
||||
/**
|
||||
*/
|
||||
public class VersionConverterTask extends AbstractTask
|
||||
public class VersionConverterTask extends AbstractFixedIntervalTask
|
||||
{
|
||||
private static final String TYPE = "version_converter";
|
||||
private static final Integer CURR_VERSION_INTEGER = new Integer(IndexIO.CURRENT_VERSION_ID);
|
||||
private static final Integer CURR_VERSION_INTEGER = IndexIO.CURRENT_VERSION_ID;
|
||||
|
||||
private static final Logger log = new Logger(VersionConverterTask.class);
|
||||
|
||||
|
@ -74,6 +73,8 @@ public class VersionConverterTask extends AbstractTask
|
|||
|
||||
private static String makeId(String dataSource, Interval interval)
|
||||
{
|
||||
Preconditions.checkNotNull(dataSource, "dataSource");
|
||||
Preconditions.checkNotNull(interval, "interval");
|
||||
return joinId(TYPE, dataSource, interval.getStart(), interval.getEnd(), new DateTime());
|
||||
}
|
||||
|
||||
|
@ -105,7 +106,6 @@ public class VersionConverterTask extends AbstractTask
|
|||
)
|
||||
{
|
||||
super(id, groupId, dataSource, interval);
|
||||
|
||||
this.segment = segment;
|
||||
}
|
||||
|
||||
|
@ -125,45 +125,43 @@ public class VersionConverterTask extends AbstractTask
|
|||
public TaskStatus run(TaskToolbox toolbox) throws Exception
|
||||
{
|
||||
if (segment == null) {
|
||||
throw new ISE("Segment was null, this should never run.", this.getClass().getSimpleName());
|
||||
}
|
||||
|
||||
log.info("I'm in a subless mood.");
|
||||
convertSegment(toolbox, segment);
|
||||
return success();
|
||||
}
|
||||
|
||||
@Override
|
||||
public TaskStatus preflight(TaskActionClient taskActionClient) throws Exception
|
||||
{
|
||||
if (segment != null) {
|
||||
return super.preflight(taskActionClient);
|
||||
}
|
||||
|
||||
List<DataSegment> segments = taskActionClient.submit(defaultListUsedAction());
|
||||
|
||||
final FunctionalIterable<Task> tasks = FunctionalIterable
|
||||
.create(segments)
|
||||
.keep(
|
||||
new Function<DataSegment, Task>()
|
||||
{
|
||||
@Override
|
||||
public Task apply(DataSegment segment)
|
||||
final List<DataSegment> segments = toolbox.getTaskActionClient().submit(
|
||||
new SegmentListUsedAction(
|
||||
getDataSource(),
|
||||
getInterval()
|
||||
)
|
||||
);
|
||||
final FunctionalIterable<Task> tasks = FunctionalIterable
|
||||
.create(segments)
|
||||
.keep(
|
||||
new Function<DataSegment, Task>()
|
||||
{
|
||||
final Integer segmentVersion = segment.getBinaryVersion();
|
||||
if (!CURR_VERSION_INTEGER.equals(segmentVersion)) {
|
||||
return new SubTask(getGroupId(), segment);
|
||||
@Override
|
||||
public Task apply(DataSegment segment)
|
||||
{
|
||||
final Integer segmentVersion = segment.getBinaryVersion();
|
||||
if (!CURR_VERSION_INTEGER.equals(segmentVersion)) {
|
||||
return new SubTask(getGroupId(), segment);
|
||||
}
|
||||
|
||||
log.info("Skipping[%s], already version[%s]", segment.getIdentifier(), segmentVersion);
|
||||
return null;
|
||||
}
|
||||
|
||||
log.info("Skipping[%s], already version[%s]", segment.getIdentifier(), segmentVersion);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
);
|
||||
);
|
||||
|
||||
taskActionClient.submit(new SpawnTasksAction(Lists.newArrayList(tasks)));
|
||||
|
||||
return TaskStatus.success(getId());
|
||||
// Vestigial from a past time when this task spawned subtasks.
|
||||
for (final Task subTask : tasks) {
|
||||
final TaskStatus status = subTask.run(toolbox);
|
||||
if (!status.isSuccess()) {
|
||||
return status;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log.info("I'm in a subless mood.");
|
||||
convertSegment(toolbox, segment);
|
||||
}
|
||||
return success();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -185,7 +183,7 @@ public class VersionConverterTask extends AbstractTask
|
|||
return super.equals(o);
|
||||
}
|
||||
|
||||
public static class SubTask extends AbstractTask
|
||||
public static class SubTask extends AbstractFixedIntervalTask
|
||||
{
|
||||
@JsonIgnore
|
||||
private final DataSegment segment;
|
||||
|
@ -251,7 +249,7 @@ public class VersionConverterTask extends AbstractTask
|
|||
}
|
||||
}
|
||||
|
||||
final Map<DataSegment, File> localSegments = toolbox.getSegments(Arrays.asList(segment));
|
||||
final Map<DataSegment, File> localSegments = toolbox.fetchSegments(Arrays.asList(segment));
|
||||
|
||||
final File location = localSegments.get(segment);
|
||||
final File outLocation = new File(location, "v9_out");
|
||||
|
|
|
@ -0,0 +1,87 @@
|
|||
/*
|
||||
* Druid - a distributed column store.
|
||||
* Copyright (C) 2012, 2013 Metamarkets Group Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
package io.druid.indexing.common.tasklogs;
|
||||
|
||||
import com.google.common.base.Optional;
|
||||
import com.google.common.io.ByteStreams;
|
||||
import com.google.common.io.Files;
|
||||
import com.google.common.io.InputSupplier;
|
||||
import com.google.inject.Inject;
|
||||
import com.metamx.common.logger.Logger;
|
||||
import io.druid.indexing.common.config.FileTaskLogsConfig;
|
||||
import io.druid.tasklogs.TaskLogs;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
|
||||
public class FileTaskLogs implements TaskLogs
|
||||
{
|
||||
private static final Logger log = new Logger(FileTaskLogs.class);
|
||||
|
||||
private final FileTaskLogsConfig config;
|
||||
|
||||
@Inject
|
||||
public FileTaskLogs(
|
||||
FileTaskLogsConfig config
|
||||
)
|
||||
{
|
||||
this.config = config;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void pushTaskLog(final String taskid, File file) throws IOException
|
||||
{
|
||||
if (!config.getDirectory().exists()) {
|
||||
config.getDirectory().mkdir();
|
||||
}
|
||||
final File outputFile = fileForTask(taskid);
|
||||
Files.copy(file, outputFile);
|
||||
log.info("Wrote task log to: %s", outputFile);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<InputSupplier<InputStream>> streamTaskLog(final String taskid, final long offset) throws IOException
|
||||
{
|
||||
final File file = fileForTask(taskid);
|
||||
if (file.exists()) {
|
||||
return Optional.<InputSupplier<InputStream>>of(
|
||||
new InputSupplier<InputStream>()
|
||||
{
|
||||
@Override
|
||||
public InputStream getInput() throws IOException
|
||||
{
|
||||
final InputStream inputStream = new FileInputStream(file);
|
||||
ByteStreams.skipFully(inputStream, offset);
|
||||
return inputStream;
|
||||
}
|
||||
}
|
||||
);
|
||||
} else {
|
||||
return Optional.absent();
|
||||
}
|
||||
}
|
||||
|
||||
private File fileForTask(final String taskid)
|
||||
{
|
||||
return new File(config.getDirectory(), String.format("%s.log", taskid));
|
||||
}
|
||||
}
|
|
@ -23,29 +23,41 @@ import com.fasterxml.jackson.databind.ObjectMapper;
|
|||
import com.google.common.base.Function;
|
||||
import com.google.common.base.Optional;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.base.Predicate;
|
||||
import com.google.common.base.Throwables;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
import com.google.inject.Inject;
|
||||
import com.metamx.common.RetryUtils;
|
||||
import com.metamx.common.lifecycle.LifecycleStart;
|
||||
import com.metamx.common.lifecycle.LifecycleStop;
|
||||
import com.metamx.emitter.EmittingLogger;
|
||||
import com.mysql.jdbc.exceptions.MySQLTransientException;
|
||||
import io.druid.db.DbConnector;
|
||||
import io.druid.db.DbTablesConfig;
|
||||
import io.druid.indexing.common.TaskLock;
|
||||
import io.druid.indexing.common.TaskStatus;
|
||||
import io.druid.indexing.common.actions.TaskAction;
|
||||
import io.druid.indexing.common.config.TaskStorageConfig;
|
||||
import io.druid.indexing.common.task.Task;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.Period;
|
||||
import org.skife.jdbi.v2.Handle;
|
||||
import org.skife.jdbi.v2.IDBI;
|
||||
import org.skife.jdbi.v2.exceptions.CallbackFailedException;
|
||||
import org.skife.jdbi.v2.exceptions.DBIException;
|
||||
import org.skife.jdbi.v2.exceptions.StatementException;
|
||||
import org.skife.jdbi.v2.exceptions.UnableToObtainConnectionException;
|
||||
import org.skife.jdbi.v2.tweak.HandleCallback;
|
||||
|
||||
import java.sql.SQLException;
|
||||
import java.sql.SQLRecoverableException;
|
||||
import java.sql.SQLTransientException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.Callable;
|
||||
|
||||
public class DbTaskStorage implements TaskStorage
|
||||
{
|
||||
|
@ -53,16 +65,24 @@ public class DbTaskStorage implements TaskStorage
|
|||
private final DbConnector dbConnector;
|
||||
private final DbTablesConfig dbTables;
|
||||
private final IDBI dbi;
|
||||
private final TaskStorageConfig config;
|
||||
|
||||
private static final EmittingLogger log = new EmittingLogger(DbTaskStorage.class);
|
||||
|
||||
@Inject
|
||||
public DbTaskStorage(ObjectMapper jsonMapper, DbConnector dbConnector, DbTablesConfig dbTables, IDBI dbi)
|
||||
public DbTaskStorage(
|
||||
final ObjectMapper jsonMapper,
|
||||
final DbConnector dbConnector,
|
||||
final DbTablesConfig dbTables,
|
||||
final IDBI dbi,
|
||||
final TaskStorageConfig config
|
||||
)
|
||||
{
|
||||
this.jsonMapper = jsonMapper;
|
||||
this.dbConnector = dbConnector;
|
||||
this.dbTables = dbTables;
|
||||
this.dbi = dbi;
|
||||
this.config = config;
|
||||
}
|
||||
|
||||
@LifecycleStart
|
||||
|
@ -92,7 +112,7 @@ public class DbTaskStorage implements TaskStorage
|
|||
log.info("Inserting task %s with status: %s", task.getId(), status);
|
||||
|
||||
try {
|
||||
dbi.withHandle(
|
||||
retryingHandle(
|
||||
new HandleCallback<Void>()
|
||||
{
|
||||
@Override
|
||||
|
@ -134,7 +154,7 @@ public class DbTaskStorage implements TaskStorage
|
|||
|
||||
log.info("Updating task %s to status: %s", status.getId(), status);
|
||||
|
||||
int updated = dbi.withHandle(
|
||||
int updated = retryingHandle(
|
||||
new HandleCallback<Integer>()
|
||||
{
|
||||
@Override
|
||||
|
@ -162,7 +182,7 @@ public class DbTaskStorage implements TaskStorage
|
|||
@Override
|
||||
public Optional<Task> getTask(final String taskid)
|
||||
{
|
||||
return dbi.withHandle(
|
||||
return retryingHandle(
|
||||
new HandleCallback<Optional<Task>>()
|
||||
{
|
||||
@Override
|
||||
|
@ -192,7 +212,7 @@ public class DbTaskStorage implements TaskStorage
|
|||
@Override
|
||||
public Optional<TaskStatus> getStatus(final String taskid)
|
||||
{
|
||||
return dbi.withHandle(
|
||||
return retryingHandle(
|
||||
new HandleCallback<Optional<TaskStatus>>()
|
||||
{
|
||||
@Override
|
||||
|
@ -220,9 +240,9 @@ public class DbTaskStorage implements TaskStorage
|
|||
}
|
||||
|
||||
@Override
|
||||
public List<Task> getRunningTasks()
|
||||
public List<Task> getActiveTasks()
|
||||
{
|
||||
return dbi.withHandle(
|
||||
return retryingHandle(
|
||||
new HandleCallback<List<Task>>()
|
||||
{
|
||||
@Override
|
||||
|
@ -231,7 +251,7 @@ public class DbTaskStorage implements TaskStorage
|
|||
final List<Map<String, Object>> dbTasks =
|
||||
handle.createQuery(
|
||||
String.format(
|
||||
"SELECT id, payload, status_payload FROM %s WHERE active = 1",
|
||||
"SELECT id, payload, status_payload FROM %s WHERE active = 1 ORDER BY created_date",
|
||||
dbTables.getTasksTable()
|
||||
)
|
||||
)
|
||||
|
@ -260,6 +280,45 @@ public class DbTaskStorage implements TaskStorage
|
|||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<TaskStatus> getRecentlyFinishedTaskStatuses()
|
||||
{
|
||||
final DateTime recent = new DateTime().minus(config.getRecentlyFinishedThreshold());
|
||||
return retryingHandle(
|
||||
new HandleCallback<List<TaskStatus>>()
|
||||
{
|
||||
@Override
|
||||
public List<TaskStatus> withHandle(Handle handle) throws Exception
|
||||
{
|
||||
final List<Map<String, Object>> dbTasks =
|
||||
handle.createQuery(
|
||||
String.format(
|
||||
"SELECT id, status_payload FROM %s WHERE active = 0 AND created_date >= :recent ORDER BY created_date DESC",
|
||||
dbTables.getTasksTable()
|
||||
)
|
||||
).bind("recent", recent.toString()).list();
|
||||
|
||||
final ImmutableList.Builder<TaskStatus> statuses = ImmutableList.builder();
|
||||
for (final Map<String, Object> row : dbTasks) {
|
||||
final String id = row.get("id").toString();
|
||||
|
||||
try {
|
||||
final TaskStatus status = jsonMapper.readValue((byte[]) row.get("status_payload"), TaskStatus.class);
|
||||
if (status.isComplete()) {
|
||||
statuses.add(status);
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
log.makeAlert(e, "Failed to parse status payload").addData("task", id).emit();
|
||||
}
|
||||
}
|
||||
|
||||
return statuses.build();
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addLock(final String taskid, final TaskLock taskLock)
|
||||
{
|
||||
|
@ -273,7 +332,7 @@ public class DbTaskStorage implements TaskStorage
|
|||
taskid
|
||||
);
|
||||
|
||||
dbi.withHandle(
|
||||
retryingHandle(
|
||||
new HandleCallback<Integer>()
|
||||
{
|
||||
@Override
|
||||
|
@ -308,7 +367,7 @@ public class DbTaskStorage implements TaskStorage
|
|||
if (taskLock.equals(taskLockToRemove)) {
|
||||
log.info("Deleting TaskLock with id[%d]: %s", id, taskLock);
|
||||
|
||||
dbi.withHandle(
|
||||
retryingHandle(
|
||||
new HandleCallback<Integer>()
|
||||
{
|
||||
@Override
|
||||
|
@ -353,7 +412,7 @@ public class DbTaskStorage implements TaskStorage
|
|||
|
||||
log.info("Logging action for task[%s]: %s", task.getId(), taskAction);
|
||||
|
||||
dbi.withHandle(
|
||||
retryingHandle(
|
||||
new HandleCallback<Integer>()
|
||||
{
|
||||
@Override
|
||||
|
@ -376,7 +435,7 @@ public class DbTaskStorage implements TaskStorage
|
|||
@Override
|
||||
public List<TaskAction> getAuditLogs(final String taskid)
|
||||
{
|
||||
return dbi.withHandle(
|
||||
return retryingHandle(
|
||||
new HandleCallback<List<TaskAction>>()
|
||||
{
|
||||
@Override
|
||||
|
@ -392,21 +451,19 @@ public class DbTaskStorage implements TaskStorage
|
|||
.bind("task_id", taskid)
|
||||
.list();
|
||||
|
||||
return Lists.transform(
|
||||
dbTaskLogs, new Function<Map<String, Object>, TaskAction>()
|
||||
{
|
||||
@Override
|
||||
public TaskAction apply(Map<String, Object> row)
|
||||
{
|
||||
try {
|
||||
return jsonMapper.readValue((byte[]) row.get("log_payload"), TaskAction.class);
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw Throwables.propagate(e);
|
||||
}
|
||||
final List<TaskAction> retList = Lists.newArrayList();
|
||||
for (final Map<String, Object> dbTaskLog : dbTaskLogs) {
|
||||
try {
|
||||
retList.add(jsonMapper.readValue((byte[]) dbTaskLog.get("log_payload"), TaskAction.class));
|
||||
}
|
||||
catch (Exception e) {
|
||||
log.makeAlert(e, "Failed to deserialize TaskLog")
|
||||
.addData("task", taskid)
|
||||
.addData("logPayload", dbTaskLog)
|
||||
.emit();
|
||||
}
|
||||
}
|
||||
);
|
||||
return retList;
|
||||
}
|
||||
}
|
||||
);
|
||||
|
@ -414,7 +471,7 @@ public class DbTaskStorage implements TaskStorage
|
|||
|
||||
private Map<Long, TaskLock> getLocksWithIds(final String taskid)
|
||||
{
|
||||
return dbi.withHandle(
|
||||
return retryingHandle(
|
||||
new HandleCallback<Map<Long, TaskLock>>()
|
||||
{
|
||||
@Override
|
||||
|
@ -432,11 +489,66 @@ public class DbTaskStorage implements TaskStorage
|
|||
|
||||
final Map<Long, TaskLock> retMap = Maps.newHashMap();
|
||||
for (final Map<String, Object> row : dbTaskLocks) {
|
||||
retMap.put((Long) row.get("id"), jsonMapper.readValue((byte[]) row.get("lock_payload"), TaskLock.class));
|
||||
try {
|
||||
retMap.put(
|
||||
(Long) row.get("id"),
|
||||
jsonMapper.readValue((byte[]) row.get("lock_payload"), TaskLock.class)
|
||||
);
|
||||
}
|
||||
catch (Exception e) {
|
||||
log.makeAlert(e, "Failed to deserialize TaskLock")
|
||||
.addData("task", taskid)
|
||||
.addData("lockPayload", row)
|
||||
.emit();
|
||||
}
|
||||
}
|
||||
return retMap;
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Retry SQL operations
|
||||
*/
|
||||
private <T> T retryingHandle(final HandleCallback<T> callback)
|
||||
{
|
||||
final Callable<T> call = new Callable<T>()
|
||||
{
|
||||
@Override
|
||||
public T call() throws Exception
|
||||
{
|
||||
return dbi.withHandle(callback);
|
||||
}
|
||||
};
|
||||
final Predicate<Throwable> shouldRetry = new Predicate<Throwable>()
|
||||
{
|
||||
@Override
|
||||
public boolean apply(Throwable e)
|
||||
{
|
||||
return shouldRetryException(e);
|
||||
}
|
||||
};
|
||||
final int maxTries = 10;
|
||||
try {
|
||||
return RetryUtils.retry(call, shouldRetry, maxTries);
|
||||
}
|
||||
catch (RuntimeException e) {
|
||||
throw Throwables.propagate(e);
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new CallbackFailedException(e);
|
||||
}
|
||||
}
|
||||
|
||||
private static boolean shouldRetryException(final Throwable e)
|
||||
{
|
||||
return e != null && (e instanceof SQLTransientException
|
||||
|| e instanceof MySQLTransientException
|
||||
|| e instanceof SQLRecoverableException
|
||||
|| e instanceof UnableToObtainConnectionException
|
||||
|| (e instanceof SQLException && ((SQLException) e).getErrorCode() == 1317)
|
||||
|| (e instanceof SQLException && shouldRetryException(e.getCause()))
|
||||
|| (e instanceof DBIException && shouldRetryException(e.getCause())));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -101,12 +101,6 @@ public class ForkingTaskRunner implements TaskRunner, TaskLogStreamer
|
|||
this.exec = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(workerConfig.getCapacity()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void bootstrap(List<Task> tasks)
|
||||
{
|
||||
// do nothing
|
||||
}
|
||||
|
||||
@Override
|
||||
public ListenableFuture<TaskStatus> run(final Task task)
|
||||
{
|
||||
|
@ -115,7 +109,7 @@ public class ForkingTaskRunner implements TaskRunner, TaskLogStreamer
|
|||
tasks.put(
|
||||
task.getId(),
|
||||
new ForkingTaskRunnerWorkItem(
|
||||
task,
|
||||
task.getId(),
|
||||
exec.submit(
|
||||
new Callable<TaskStatus>()
|
||||
{
|
||||
|
@ -224,29 +218,20 @@ public class ForkingTaskRunner implements TaskRunner, TaskLogStreamer
|
|||
}
|
||||
|
||||
log.info("Logging task %s output to: %s", task.getId(), logFile);
|
||||
|
||||
final InputStream fromProc = processHolder.process.getInputStream();
|
||||
final OutputStream toLogfile = closer.register(
|
||||
Files.newOutputStreamSupplier(logFile).getOutput()
|
||||
);
|
||||
|
||||
boolean runFailed = true;
|
||||
|
||||
ByteStreams.copy(fromProc, toLogfile);
|
||||
final int statusCode = processHolder.process.waitFor();
|
||||
log.info("Process exited with status[%d] for task: %s", statusCode, task.getId());
|
||||
|
||||
if (statusCode == 0) {
|
||||
runFailed = false;
|
||||
try (final OutputStream toLogfile = Files.newOutputStreamSupplier(logFile).getOutput()) {
|
||||
ByteStreams.copy(processHolder.process.getInputStream(), toLogfile);
|
||||
final int statusCode = processHolder.process.waitFor();
|
||||
log.info("Process exited with status[%d] for task: %s", statusCode, task.getId());
|
||||
if (statusCode == 0) {
|
||||
runFailed = false;
|
||||
}
|
||||
}
|
||||
finally {
|
||||
// Upload task logs
|
||||
taskLogPusher.pushTaskLog(task.getId(), logFile);
|
||||
}
|
||||
|
||||
// Upload task logs
|
||||
|
||||
// XXX: Consider uploading periodically for very long-lived tasks to prevent
|
||||
// XXX: bottlenecks at the end or the possibility of losing a lot of logs all
|
||||
// XXX: at once.
|
||||
|
||||
taskLogPusher.pushTaskLog(task.getId(), logFile);
|
||||
|
||||
if (!runFailed) {
|
||||
// Process exited successfully
|
||||
|
@ -261,9 +246,9 @@ public class ForkingTaskRunner implements TaskRunner, TaskLogStreamer
|
|||
closer.close();
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
log.info(e, "Exception caught during execution");
|
||||
throw Throwables.propagate(e);
|
||||
catch (Throwable t) {
|
||||
log.info(t, "Exception caught during execution");
|
||||
throw Throwables.propagate(t);
|
||||
}
|
||||
finally {
|
||||
try {
|
||||
|
@ -358,6 +343,14 @@ public class ForkingTaskRunner implements TaskRunner, TaskLogStreamer
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<TaskRunnerWorkItem> getKnownTasks()
|
||||
{
|
||||
synchronized (tasks) {
|
||||
return Lists.<TaskRunnerWorkItem>newArrayList(tasks.values());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<ZkWorker> getWorkers()
|
||||
{
|
||||
|
@ -389,7 +382,7 @@ public class ForkingTaskRunner implements TaskRunner, TaskLogStreamer
|
|||
if (offset > 0) {
|
||||
raf.seek(offset);
|
||||
} else if (offset < 0 && offset < rafLength) {
|
||||
raf.seek(rafLength + offset);
|
||||
raf.seek(Math.max(0, rafLength + offset));
|
||||
}
|
||||
return Channels.newInputStream(raf.getChannel());
|
||||
}
|
||||
|
@ -425,11 +418,11 @@ public class ForkingTaskRunner implements TaskRunner, TaskLogStreamer
|
|||
private volatile ProcessHolder processHolder = null;
|
||||
|
||||
private ForkingTaskRunnerWorkItem(
|
||||
Task task,
|
||||
String taskId,
|
||||
ListenableFuture<TaskStatus> statusFuture
|
||||
)
|
||||
{
|
||||
super(task, statusFuture);
|
||||
super(taskId, statusFuture);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package io.druid.indexing.overlord;
|
||||
|
||||
import com.google.api.client.util.Lists;
|
||||
import com.google.common.base.Optional;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.ArrayListMultimap;
|
||||
|
@ -26,11 +27,15 @@ import com.google.common.collect.HashMultimap;
|
|||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.Maps;
|
||||
import com.google.common.collect.Multimap;
|
||||
import com.google.common.collect.Ordering;
|
||||
import com.google.inject.Inject;
|
||||
import com.metamx.common.logger.Logger;
|
||||
import io.druid.indexing.common.TaskLock;
|
||||
import io.druid.indexing.common.TaskStatus;
|
||||
import io.druid.indexing.common.actions.TaskAction;
|
||||
import io.druid.indexing.common.config.TaskStorageConfig;
|
||||
import io.druid.indexing.common.task.Task;
|
||||
import org.joda.time.DateTime;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -42,6 +47,8 @@ import java.util.concurrent.locks.ReentrantLock;
|
|||
*/
|
||||
public class HeapMemoryTaskStorage implements TaskStorage
|
||||
{
|
||||
private final TaskStorageConfig config;
|
||||
|
||||
private final ReentrantLock giant = new ReentrantLock();
|
||||
private final Map<String, TaskStuff> tasks = Maps.newHashMap();
|
||||
private final Multimap<String, TaskLock> taskLocks = HashMultimap.create();
|
||||
|
@ -49,6 +56,12 @@ public class HeapMemoryTaskStorage implements TaskStorage
|
|||
|
||||
private static final Logger log = new Logger(HeapMemoryTaskStorage.class);
|
||||
|
||||
@Inject
|
||||
public HeapMemoryTaskStorage(TaskStorageConfig config)
|
||||
{
|
||||
this.config = config;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void insert(Task task, TaskStatus status)
|
||||
{
|
||||
|
@ -69,7 +82,7 @@ public class HeapMemoryTaskStorage implements TaskStorage
|
|||
}
|
||||
|
||||
log.info("Inserting task %s with status: %s", task.getId(), status);
|
||||
tasks.put(task.getId(), new TaskStuff(task, status));
|
||||
tasks.put(task.getId(), new TaskStuff(task, status, new DateTime()));
|
||||
} finally {
|
||||
giant.unlock();
|
||||
}
|
||||
|
@ -128,7 +141,7 @@ public class HeapMemoryTaskStorage implements TaskStorage
|
|||
}
|
||||
|
||||
@Override
|
||||
public List<Task> getRunningTasks()
|
||||
public List<Task> getActiveTasks()
|
||||
{
|
||||
giant.lock();
|
||||
|
||||
|
@ -139,13 +152,39 @@ public class HeapMemoryTaskStorage implements TaskStorage
|
|||
listBuilder.add(taskStuff.getTask());
|
||||
}
|
||||
}
|
||||
|
||||
return listBuilder.build();
|
||||
} finally {
|
||||
giant.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<TaskStatus> getRecentlyFinishedTaskStatuses()
|
||||
{
|
||||
giant.lock();
|
||||
|
||||
try {
|
||||
final List<TaskStatus> returns = Lists.newArrayList();
|
||||
final long recent = System.currentTimeMillis() - config.getRecentlyFinishedThreshold().getMillis();
|
||||
final Ordering<TaskStuff> createdDateDesc = new Ordering<TaskStuff>()
|
||||
{
|
||||
@Override
|
||||
public int compare(TaskStuff a, TaskStuff b)
|
||||
{
|
||||
return a.getCreatedDate().compareTo(b.getCreatedDate());
|
||||
}
|
||||
}.reverse();
|
||||
for(final TaskStuff taskStuff : createdDateDesc.sortedCopy(tasks.values())) {
|
||||
if(taskStuff.getStatus().isComplete() && taskStuff.getCreatedDate().getMillis() > recent) {
|
||||
returns.add(taskStuff.getStatus());
|
||||
}
|
||||
}
|
||||
return returns;
|
||||
} finally {
|
||||
giant.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addLock(final String taskid, final TaskLock taskLock)
|
||||
{
|
||||
|
@ -212,8 +251,9 @@ public class HeapMemoryTaskStorage implements TaskStorage
|
|||
{
|
||||
final Task task;
|
||||
final TaskStatus status;
|
||||
final DateTime createdDate;
|
||||
|
||||
private TaskStuff(Task task, TaskStatus status)
|
||||
private TaskStuff(Task task, TaskStatus status, DateTime createdDate)
|
||||
{
|
||||
Preconditions.checkNotNull(task);
|
||||
Preconditions.checkNotNull(status);
|
||||
|
@ -221,6 +261,7 @@ public class HeapMemoryTaskStorage implements TaskStorage
|
|||
|
||||
this.task = task;
|
||||
this.status = status;
|
||||
this.createdDate = Preconditions.checkNotNull(createdDate, "createdDate");
|
||||
}
|
||||
|
||||
public Task getTask()
|
||||
|
@ -233,9 +274,14 @@ public class HeapMemoryTaskStorage implements TaskStorage
|
|||
return status;
|
||||
}
|
||||
|
||||
public DateTime getCreatedDate()
|
||||
{
|
||||
return createdDate;
|
||||
}
|
||||
|
||||
private TaskStuff withStatus(TaskStatus _status)
|
||||
{
|
||||
return new TaskStuff(task, _status);
|
||||
return new TaskStuff(task, _status, createdDate);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,8 +28,6 @@ import com.google.common.collect.Ordering;
|
|||
import com.google.common.collect.Sets;
|
||||
import com.google.inject.Inject;
|
||||
import com.metamx.common.logger.Logger;
|
||||
import io.druid.db.DbConnector;
|
||||
import io.druid.db.DbConnectorConfig;
|
||||
import io.druid.db.DbTablesConfig;
|
||||
import io.druid.timeline.DataSegment;
|
||||
import io.druid.timeline.TimelineObjectHolder;
|
||||
|
@ -44,6 +42,7 @@ import org.skife.jdbi.v2.ResultIterator;
|
|||
import org.skife.jdbi.v2.StatementContext;
|
||||
import org.skife.jdbi.v2.TransactionCallback;
|
||||
import org.skife.jdbi.v2.TransactionStatus;
|
||||
import org.skife.jdbi.v2.exceptions.CallbackFailedException;
|
||||
import org.skife.jdbi.v2.tweak.HandleCallback;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -171,39 +170,39 @@ public class IndexerDBCoordinator
|
|||
private boolean announceHistoricalSegment(final Handle handle, final DataSegment segment) throws IOException
|
||||
{
|
||||
try {
|
||||
final List<Map<String, Object>> exists = handle.createQuery(
|
||||
String.format(
|
||||
"SELECT id FROM %s WHERE id = :identifier",
|
||||
dbTables.getSegmentsTable()
|
||||
)
|
||||
).bind(
|
||||
"identifier",
|
||||
segment.getIdentifier()
|
||||
).list();
|
||||
|
||||
if (!exists.isEmpty()) {
|
||||
if (segmentExists(handle, segment)) {
|
||||
log.info("Found [%s] in DB, not updating DB", segment.getIdentifier());
|
||||
return false;
|
||||
}
|
||||
|
||||
handle.createStatement(
|
||||
String.format(
|
||||
"INSERT INTO %s (id, dataSource, created_date, start, end, partitioned, version, used, payload) VALUES (:id, :dataSource, :created_date, :start, :end, :partitioned, :version, :used, :payload)",
|
||||
dbTables.getSegmentsTable()
|
||||
)
|
||||
)
|
||||
.bind("id", segment.getIdentifier())
|
||||
.bind("dataSource", segment.getDataSource())
|
||||
.bind("created_date", new DateTime().toString())
|
||||
.bind("start", segment.getInterval().getStart().toString())
|
||||
.bind("end", segment.getInterval().getEnd().toString())
|
||||
.bind("partitioned", segment.getShardSpec().getPartitionNum())
|
||||
.bind("version", segment.getVersion())
|
||||
.bind("used", true)
|
||||
.bind("payload", jsonMapper.writeValueAsString(segment))
|
||||
.execute();
|
||||
// Try/catch to work around races due to SELECT -> INSERT. Avoid ON DUPLICATE KEY since it's not portable.
|
||||
try {
|
||||
handle.createStatement(
|
||||
String.format(
|
||||
"INSERT INTO %s (id, dataSource, created_date, start, end, partitioned, version, used, payload) "
|
||||
+ "VALUES (:id, :dataSource, :created_date, :start, :end, :partitioned, :version, :used, :payload)",
|
||||
dbTables.getSegmentsTable()
|
||||
)
|
||||
)
|
||||
.bind("id", segment.getIdentifier())
|
||||
.bind("dataSource", segment.getDataSource())
|
||||
.bind("created_date", new DateTime().toString())
|
||||
.bind("start", segment.getInterval().getStart().toString())
|
||||
.bind("end", segment.getInterval().getEnd().toString())
|
||||
.bind("partitioned", segment.getShardSpec().getPartitionNum())
|
||||
.bind("version", segment.getVersion())
|
||||
.bind("used", true)
|
||||
.bind("payload", jsonMapper.writeValueAsString(segment))
|
||||
.execute();
|
||||
|
||||
log.info("Published segment [%s] to DB", segment.getIdentifier());
|
||||
log.info("Published segment [%s] to DB", segment.getIdentifier());
|
||||
} catch (Exception e) {
|
||||
if (e.getCause() instanceof SQLException && segmentExists(handle, segment)) {
|
||||
log.info("Found [%s] in DB, not updating DB", segment.getIdentifier());
|
||||
} else {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (IOException e) {
|
||||
log.error(e, "Exception inserting into DB");
|
||||
|
@ -213,6 +212,38 @@ public class IndexerDBCoordinator
|
|||
return true;
|
||||
}
|
||||
|
||||
private boolean segmentExists(final Handle handle, final DataSegment segment) {
|
||||
final List<Map<String, Object>> exists = handle.createQuery(
|
||||
String.format(
|
||||
"SELECT id FROM %s WHERE id = :identifier",
|
||||
dbTables.getSegmentsTable()
|
||||
)
|
||||
).bind(
|
||||
"identifier",
|
||||
segment.getIdentifier()
|
||||
).list();
|
||||
|
||||
return !exists.isEmpty();
|
||||
}
|
||||
|
||||
public void updateSegmentMetadata(final Set<DataSegment> segments) throws IOException
|
||||
{
|
||||
dbi.inTransaction(
|
||||
new TransactionCallback<Void>()
|
||||
{
|
||||
@Override
|
||||
public Void inTransaction(Handle handle, TransactionStatus transactionStatus) throws Exception
|
||||
{
|
||||
for(final DataSegment segment : segments) {
|
||||
updatePayload(handle, segment);
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
public void deleteSegments(final Set<DataSegment> segments) throws IOException
|
||||
{
|
||||
dbi.inTransaction(
|
||||
|
@ -235,10 +266,27 @@ public class IndexerDBCoordinator
|
|||
{
|
||||
handle.createStatement(
|
||||
String.format("DELETE from %s WHERE id = :id", dbTables.getSegmentsTable())
|
||||
).bind("id", segment.getIdentifier())
|
||||
)
|
||||
.bind("id", segment.getIdentifier())
|
||||
.execute();
|
||||
}
|
||||
|
||||
private void updatePayload(final Handle handle, final DataSegment segment) throws IOException
|
||||
{
|
||||
try {
|
||||
handle.createStatement(
|
||||
String.format("UPDATE %s SET payload = :payload WHERE id = :id", dbTables.getSegmentsTable())
|
||||
)
|
||||
.bind("id", segment.getIdentifier())
|
||||
.bind("payload", jsonMapper.writeValueAsString(segment))
|
||||
.execute();
|
||||
}
|
||||
catch (IOException e) {
|
||||
log.error(e, "Exception inserting into DB");
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
public List<DataSegment> getUnusedSegmentsForInterval(final String dataSource, final Interval interval)
|
||||
{
|
||||
List<DataSegment> matchingSegments = dbi.withHandle(
|
||||
|
|
|
@ -27,11 +27,14 @@ import com.google.common.base.Preconditions;
|
|||
import com.google.common.base.Stopwatch;
|
||||
import com.google.common.base.Supplier;
|
||||
import com.google.common.base.Throwables;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
import com.google.common.collect.Sets;
|
||||
import com.google.common.io.InputSupplier;
|
||||
import com.google.common.primitives.Ints;
|
||||
import com.google.common.util.concurrent.FutureCallback;
|
||||
import com.google.common.util.concurrent.Futures;
|
||||
import com.google.common.util.concurrent.ListenableFuture;
|
||||
import com.google.common.util.concurrent.SettableFuture;
|
||||
import com.metamx.common.ISE;
|
||||
|
@ -51,8 +54,8 @@ import io.druid.indexing.worker.TaskAnnouncement;
|
|||
import io.druid.indexing.worker.Worker;
|
||||
import io.druid.server.initialization.ZkPathsConfig;
|
||||
import io.druid.tasklogs.TaskLogStreamer;
|
||||
import org.apache.commons.lang.mutable.MutableInt;
|
||||
import org.apache.curator.framework.CuratorFramework;
|
||||
import org.apache.curator.framework.recipes.cache.ChildData;
|
||||
import org.apache.curator.framework.recipes.cache.PathChildrenCache;
|
||||
import org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent;
|
||||
import org.apache.curator.framework.recipes.cache.PathChildrenCacheListener;
|
||||
|
@ -72,6 +75,7 @@ import java.util.Map;
|
|||
import java.util.TreeSet;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
|
@ -108,11 +112,15 @@ public class RemoteTaskRunner implements TaskRunner, TaskLogStreamer
|
|||
private final HttpClient httpClient;
|
||||
|
||||
// all workers that exist in ZK
|
||||
private final Map<String, ZkWorker> zkWorkers = new ConcurrentHashMap<String, ZkWorker>();
|
||||
private final ConcurrentMap<String, ZkWorker> zkWorkers = new ConcurrentHashMap<>();
|
||||
// payloads of pending tasks, which we remember just long enough to assign to workers
|
||||
private final ConcurrentMap<String, Task> pendingTaskPayloads = new ConcurrentHashMap<>();
|
||||
// tasks that have not yet been assigned to a worker
|
||||
private final RemoteTaskRunnerWorkQueue pendingTasks = new RemoteTaskRunnerWorkQueue();
|
||||
// all tasks that have been assigned to a worker
|
||||
private final RemoteTaskRunnerWorkQueue runningTasks = new RemoteTaskRunnerWorkQueue();
|
||||
// tasks that have not yet run
|
||||
private final RemoteTaskRunnerWorkQueue pendingTasks = new RemoteTaskRunnerWorkQueue();
|
||||
// tasks that are complete but not cleaned up yet
|
||||
private final RemoteTaskRunnerWorkQueue completeTasks = new RemoteTaskRunnerWorkQueue();
|
||||
|
||||
private final ExecutorService runPendingTasksExec = Executors.newSingleThreadExecutor();
|
||||
|
||||
|
@ -148,6 +156,9 @@ public class RemoteTaskRunner implements TaskRunner, TaskLogStreamer
|
|||
return;
|
||||
}
|
||||
|
||||
final MutableInt waitingFor = new MutableInt(1);
|
||||
final Object waitingForMonitor = new Object();
|
||||
|
||||
// Add listener for creation/deletion of workers
|
||||
workerPathCache.getListenable().addListener(
|
||||
new PathChildrenCacheListener()
|
||||
|
@ -162,7 +173,32 @@ public class RemoteTaskRunner implements TaskRunner, TaskLogStreamer
|
|||
event.getData().getData(),
|
||||
Worker.class
|
||||
);
|
||||
addWorker(worker, PathChildrenCache.StartMode.NORMAL);
|
||||
synchronized (waitingForMonitor) {
|
||||
waitingFor.increment();
|
||||
}
|
||||
Futures.addCallback(
|
||||
addWorker(worker),
|
||||
new FutureCallback<ZkWorker>()
|
||||
{
|
||||
@Override
|
||||
public void onSuccess(ZkWorker zkWorker)
|
||||
{
|
||||
synchronized (waitingForMonitor) {
|
||||
waitingFor.decrement();
|
||||
waitingForMonitor.notifyAll();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable throwable)
|
||||
{
|
||||
synchronized (waitingForMonitor) {
|
||||
waitingFor.decrement();
|
||||
waitingForMonitor.notifyAll();
|
||||
}
|
||||
}
|
||||
}
|
||||
);
|
||||
break;
|
||||
case CHILD_REMOVED:
|
||||
worker = jsonMapper.readValue(
|
||||
|
@ -171,22 +207,23 @@ public class RemoteTaskRunner implements TaskRunner, TaskLogStreamer
|
|||
);
|
||||
removeWorker(worker);
|
||||
break;
|
||||
case INITIALIZED:
|
||||
synchronized (waitingForMonitor) {
|
||||
waitingFor.decrement();
|
||||
waitingForMonitor.notifyAll();
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
);
|
||||
workerPathCache.start(PathChildrenCache.StartMode.BUILD_INITIAL_CACHE);
|
||||
|
||||
for (ChildData childData : workerPathCache.getCurrentData()) {
|
||||
final Worker worker = jsonMapper.readValue(
|
||||
childData.getData(),
|
||||
Worker.class
|
||||
);
|
||||
addWorker(worker, PathChildrenCache.StartMode.BUILD_INITIAL_CACHE);
|
||||
workerPathCache.start(PathChildrenCache.StartMode.POST_INITIALIZED_EVENT);
|
||||
synchronized (waitingForMonitor) {
|
||||
while (waitingFor.intValue() > 0) {
|
||||
waitingForMonitor.wait();
|
||||
}
|
||||
}
|
||||
|
||||
started = true;
|
||||
}
|
||||
catch (Exception e) {
|
||||
|
@ -201,7 +238,7 @@ public class RemoteTaskRunner implements TaskRunner, TaskLogStreamer
|
|||
if (!started) {
|
||||
return;
|
||||
}
|
||||
|
||||
started = false;
|
||||
for (ZkWorker zkWorker : zkWorkers.values()) {
|
||||
zkWorker.close();
|
||||
}
|
||||
|
@ -210,27 +247,31 @@ public class RemoteTaskRunner implements TaskRunner, TaskLogStreamer
|
|||
catch (Exception e) {
|
||||
throw Throwables.propagate(e);
|
||||
}
|
||||
finally {
|
||||
started = false;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<ZkWorker> getWorkers()
|
||||
{
|
||||
return zkWorkers.values();
|
||||
return ImmutableList.copyOf(zkWorkers.values());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<RemoteTaskRunnerWorkItem> getRunningTasks()
|
||||
{
|
||||
return runningTasks.values();
|
||||
return ImmutableList.copyOf(runningTasks.values());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<RemoteTaskRunnerWorkItem> getPendingTasks()
|
||||
{
|
||||
return pendingTasks.values();
|
||||
return ImmutableList.copyOf(pendingTasks.values());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<RemoteTaskRunnerWorkItem> getKnownTasks()
|
||||
{
|
||||
// Racey, since there is a period of time during assignment when a task is neither pending nor running
|
||||
return ImmutableList.copyOf(Iterables.concat(pendingTasks.values(), runningTasks.values(), completeTasks.values()));
|
||||
}
|
||||
|
||||
public ZkWorker findWorkerRunningTask(String taskId)
|
||||
|
@ -243,46 +284,10 @@ public class RemoteTaskRunner implements TaskRunner, TaskLogStreamer
|
|||
return null;
|
||||
}
|
||||
|
||||
public boolean isWorkerRunningTask(Worker worker, Task task)
|
||||
public boolean isWorkerRunningTask(Worker worker, String taskId)
|
||||
{
|
||||
ZkWorker zkWorker = zkWorkers.get(worker.getHost());
|
||||
|
||||
return (zkWorker != null && zkWorker.isRunningTask(task.getId()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void bootstrap(List<Task> tasks)
|
||||
{
|
||||
try {
|
||||
if (!started) {
|
||||
throw new ISE("Must start RTR first before calling bootstrap!");
|
||||
}
|
||||
|
||||
Map<String, Worker> existingTasks = Maps.newHashMap();
|
||||
for (ZkWorker zkWorker : zkWorkers.values()) {
|
||||
for (String runningTask : zkWorker.getRunningTasks().keySet()) {
|
||||
existingTasks.put(runningTask, zkWorker.getWorker());
|
||||
}
|
||||
}
|
||||
|
||||
for (Task task : tasks) {
|
||||
Worker worker = existingTasks.get(task.getId());
|
||||
if (worker != null) {
|
||||
log.info("Bootstrap found [%s] running on [%s].", task.getId(), worker.getHost());
|
||||
runningTasks.put(
|
||||
task.getId(),
|
||||
new RemoteTaskRunnerWorkItem(
|
||||
task,
|
||||
SettableFuture.<TaskStatus>create(),
|
||||
worker
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw Throwables.propagate(e);
|
||||
}
|
||||
return (zkWorker != null && zkWorker.isRunningTask(taskId));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -293,8 +298,11 @@ public class RemoteTaskRunner implements TaskRunner, TaskLogStreamer
|
|||
@Override
|
||||
public ListenableFuture<TaskStatus> run(final Task task)
|
||||
{
|
||||
RemoteTaskRunnerWorkItem runningTask = runningTasks.get(task.getId());
|
||||
if (runningTask != null) {
|
||||
final RemoteTaskRunnerWorkItem completeTask, runningTask, pendingTask;
|
||||
if ((pendingTask = pendingTasks.get(task.getId())) != null) {
|
||||
log.info("Assigned a task[%s] that is already pending, not doing anything", task.getId());
|
||||
return pendingTask.getResult();
|
||||
} else if ((runningTask = runningTasks.get(task.getId())) != null) {
|
||||
ZkWorker zkWorker = findWorkerRunningTask(task.getId());
|
||||
if (zkWorker == null) {
|
||||
log.warn("Told to run task[%s], but no worker has started running it yet.", task.getId());
|
||||
|
@ -302,26 +310,15 @@ public class RemoteTaskRunner implements TaskRunner, TaskLogStreamer
|
|||
log.info("Task[%s] already running on %s.", task.getId(), zkWorker.getWorker().getHost());
|
||||
TaskAnnouncement announcement = zkWorker.getRunningTasks().get(task.getId());
|
||||
if (announcement.getTaskStatus().isComplete()) {
|
||||
taskComplete(runningTask, zkWorker, task.getId(), announcement.getTaskStatus());
|
||||
taskComplete(runningTask, zkWorker, announcement.getTaskStatus());
|
||||
}
|
||||
}
|
||||
|
||||
return runningTask.getResult();
|
||||
} else if ((completeTask = completeTasks.get(task.getId())) != null) {
|
||||
return completeTask.getResult();
|
||||
} else {
|
||||
return addPendingTask(task).getResult();
|
||||
}
|
||||
|
||||
RemoteTaskRunnerWorkItem pendingTask = pendingTasks.get(task.getId());
|
||||
if (pendingTask != null) {
|
||||
log.info("Assigned a task[%s] that is already pending, not doing anything", task.getId());
|
||||
return pendingTask.getResult();
|
||||
}
|
||||
|
||||
RemoteTaskRunnerWorkItem taskRunnerWorkItem = new RemoteTaskRunnerWorkItem(
|
||||
task,
|
||||
SettableFuture.<TaskStatus>create(),
|
||||
null
|
||||
);
|
||||
addPendingTask(taskRunnerWorkItem);
|
||||
return taskRunnerWorkItem.getResult();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -330,39 +327,43 @@ public class RemoteTaskRunner implements TaskRunner, TaskLogStreamer
|
|||
* @param taskId - task id to shutdown
|
||||
*/
|
||||
@Override
|
||||
public void shutdown(String taskId)
|
||||
public void shutdown(final String taskId)
|
||||
{
|
||||
if (pendingTasks.containsKey(taskId)) {
|
||||
pendingTasks.remove(taskId);
|
||||
return;
|
||||
}
|
||||
if (!started) {
|
||||
log.info("This TaskRunner is stopped. Ignoring shutdown command for task: %s", taskId);
|
||||
} else if (pendingTasks.remove(taskId) != null) {
|
||||
pendingTaskPayloads.remove(taskId);
|
||||
log.info("Removed task from pending queue: %s", taskId);
|
||||
} else if (completeTasks.containsKey(taskId)) {
|
||||
cleanup(completeTasks.get(taskId).getWorker().getHost(), taskId);
|
||||
} else {
|
||||
final ZkWorker zkWorker = findWorkerRunningTask(taskId);
|
||||
|
||||
final ZkWorker zkWorker = findWorkerRunningTask(taskId);
|
||||
|
||||
if (zkWorker == null) {
|
||||
log.info("Can't shutdown! No worker running task %s", taskId);
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
final URL url = makeWorkerURL(zkWorker.getWorker(), String.format("/task/%s/shutdown", taskId));
|
||||
final StatusResponseHolder response = httpClient.post(url)
|
||||
.go(RESPONSE_HANDLER)
|
||||
.get();
|
||||
|
||||
log.info(
|
||||
"Sent shutdown message to worker: %s, status %s, response: %s",
|
||||
zkWorker.getWorker().getHost(),
|
||||
response.getStatus(),
|
||||
response.getContent()
|
||||
);
|
||||
|
||||
if (!response.getStatus().equals(HttpResponseStatus.ACCEPTED)) {
|
||||
log.error("Shutdown failed for %s! Are you sure the task was running?", taskId);
|
||||
if (zkWorker == null) {
|
||||
log.info("Can't shutdown! No worker running task %s", taskId);
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
final URL url = makeWorkerURL(zkWorker.getWorker(), String.format("/task/%s/shutdown", taskId));
|
||||
final StatusResponseHolder response = httpClient.post(url)
|
||||
.go(RESPONSE_HANDLER)
|
||||
.get();
|
||||
|
||||
log.info(
|
||||
"Sent shutdown message to worker: %s, status %s, response: %s",
|
||||
zkWorker.getWorker().getHost(),
|
||||
response.getStatus(),
|
||||
response.getContent()
|
||||
);
|
||||
|
||||
if (!response.getStatus().equals(HttpResponseStatus.ACCEPTED)) {
|
||||
log.error("Shutdown failed for %s! Are you sure the task was running?", taskId);
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw Throwables.propagate(e);
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw Throwables.propagate(e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -417,12 +418,18 @@ public class RemoteTaskRunner implements TaskRunner, TaskLogStreamer
|
|||
/**
|
||||
* Adds a task to the pending queue
|
||||
*/
|
||||
private void addPendingTask(final RemoteTaskRunnerWorkItem taskRunnerWorkItem)
|
||||
private RemoteTaskRunnerWorkItem addPendingTask(final Task task)
|
||||
{
|
||||
log.info("Added pending task %s", taskRunnerWorkItem.getTask().getId());
|
||||
|
||||
pendingTasks.put(taskRunnerWorkItem.getTask().getId(), taskRunnerWorkItem);
|
||||
log.info("Added pending task %s", task.getId());
|
||||
final RemoteTaskRunnerWorkItem taskRunnerWorkItem = new RemoteTaskRunnerWorkItem(
|
||||
task.getId(),
|
||||
SettableFuture.<TaskStatus>create(),
|
||||
null
|
||||
);
|
||||
pendingTaskPayloads.put(task.getId(), task);
|
||||
pendingTasks.put(task.getId(), taskRunnerWorkItem);
|
||||
runPendingTasks();
|
||||
return taskRunnerWorkItem;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -439,11 +446,14 @@ public class RemoteTaskRunner implements TaskRunner, TaskLogStreamer
|
|||
public Void call() throws Exception
|
||||
{
|
||||
try {
|
||||
// make a copy of the pending tasks because assignTask may delete tasks from pending and move them
|
||||
// make a copy of the pending tasks because tryAssignTask may delete tasks from pending and move them
|
||||
// into running status
|
||||
List<RemoteTaskRunnerWorkItem> copy = Lists.newArrayList(pendingTasks.values());
|
||||
for (RemoteTaskRunnerWorkItem taskWrapper : copy) {
|
||||
assignTask(taskWrapper);
|
||||
for (RemoteTaskRunnerWorkItem taskRunnerWorkItem : copy) {
|
||||
String taskId = taskRunnerWorkItem.getTaskId();
|
||||
if (tryAssignTask(pendingTaskPayloads.get(taskId), taskRunnerWorkItem)) {
|
||||
pendingTaskPayloads.remove(taskId);
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
|
@ -457,21 +467,30 @@ public class RemoteTaskRunner implements TaskRunner, TaskLogStreamer
|
|||
}
|
||||
|
||||
/**
|
||||
* Removes a task from the running queue and clears out the ZK status path of the task.
|
||||
* Removes a task from the complete queue and clears out the ZK status path of the task.
|
||||
*
|
||||
* @param workerId - the worker that was previously running the task
|
||||
* @param taskId - the task to cleanup
|
||||
*/
|
||||
private void cleanup(final String workerId, final String taskId)
|
||||
{
|
||||
log.info("Cleaning up [%s]", taskId);
|
||||
runningTasks.remove(taskId);
|
||||
final String statusPath = JOINER.join(zkPaths.getIndexerStatusPath(), workerId, taskId);
|
||||
try {
|
||||
cf.delete().guaranteed().forPath(statusPath);
|
||||
if (!started) {
|
||||
return;
|
||||
}
|
||||
catch (Exception e) {
|
||||
log.info("Tried to delete status path[%s] that didn't exist! Must've gone away already?", statusPath);
|
||||
if (completeTasks.remove(taskId) == null) {
|
||||
log.makeAlert("WTF?! Asked to cleanup nonexistent task")
|
||||
.addData("workerId", workerId)
|
||||
.addData("taskId", taskId)
|
||||
.emit();
|
||||
} else {
|
||||
log.info("Cleaning up task[%s] on worker[%s]", taskId, workerId);
|
||||
final String statusPath = JOINER.join(zkPaths.getIndexerStatusPath(), workerId, taskId);
|
||||
try {
|
||||
cf.delete().guaranteed().forPath(statusPath);
|
||||
}
|
||||
catch (Exception e) {
|
||||
log.info("Tried to delete status path[%s] that didn't exist! Must've gone away already?", statusPath);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -481,26 +500,34 @@ public class RemoteTaskRunner implements TaskRunner, TaskLogStreamer
|
|||
* needs to bootstrap after a restart.
|
||||
*
|
||||
* @param taskRunnerWorkItem - the task to assign
|
||||
* @return true iff the task is now assigned
|
||||
*/
|
||||
private void assignTask(RemoteTaskRunnerWorkItem taskRunnerWorkItem)
|
||||
private boolean tryAssignTask(final Task task, final RemoteTaskRunnerWorkItem taskRunnerWorkItem)
|
||||
{
|
||||
try {
|
||||
final String taskId = taskRunnerWorkItem.getTask().getId();
|
||||
Preconditions.checkNotNull(task, "task");
|
||||
Preconditions.checkNotNull(taskRunnerWorkItem, "taskRunnerWorkItem");
|
||||
Preconditions.checkArgument(task.getId().equals(taskRunnerWorkItem.getTaskId()), "task id != workItem id");
|
||||
|
||||
if (runningTasks.containsKey(taskId) || findWorkerRunningTask(taskId) != null) {
|
||||
log.info("Task[%s] already running.", taskId);
|
||||
if (runningTasks.containsKey(task.getId()) || findWorkerRunningTask(task.getId()) != null) {
|
||||
log.info("Task[%s] already running.", task.getId());
|
||||
return true;
|
||||
} else {
|
||||
// Nothing running this task, announce it in ZK for a worker to run it
|
||||
ZkWorker zkWorker = findWorkerForTask(taskRunnerWorkItem.getTask());
|
||||
ZkWorker zkWorker = findWorkerForTask(task);
|
||||
if (zkWorker != null) {
|
||||
announceTask(zkWorker, taskRunnerWorkItem);
|
||||
announceTask(task, zkWorker, taskRunnerWorkItem);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
log.makeAlert(e, "Exception while trying to run task")
|
||||
.addData("taskId", taskRunnerWorkItem.getTask().getId())
|
||||
.addData("taskId", taskRunnerWorkItem.getTaskId())
|
||||
.emit();
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -511,9 +538,13 @@ public class RemoteTaskRunner implements TaskRunner, TaskLogStreamer
|
|||
* @param theZkWorker The worker the task is assigned to
|
||||
* @param taskRunnerWorkItem The task to be assigned
|
||||
*/
|
||||
private void announceTask(ZkWorker theZkWorker, RemoteTaskRunnerWorkItem taskRunnerWorkItem) throws Exception
|
||||
private void announceTask(
|
||||
final Task task,
|
||||
final ZkWorker theZkWorker,
|
||||
final RemoteTaskRunnerWorkItem taskRunnerWorkItem
|
||||
) throws Exception
|
||||
{
|
||||
final Task task = taskRunnerWorkItem.getTask();
|
||||
Preconditions.checkArgument(task.getId().equals(taskRunnerWorkItem.getTaskId()), "task id != workItem id");
|
||||
final Worker theWorker = theZkWorker.getWorker();
|
||||
|
||||
log.info("Coordinator asking Worker[%s] to add task[%s]", theWorker.getHost(), task.getId());
|
||||
|
@ -550,7 +581,7 @@ public class RemoteTaskRunner implements TaskRunner, TaskLogStreamer
|
|||
Stopwatch timeoutStopwatch = new Stopwatch();
|
||||
timeoutStopwatch.start();
|
||||
synchronized (statusLock) {
|
||||
while (!isWorkerRunningTask(theWorker, task)) {
|
||||
while (!isWorkerRunningTask(theWorker, task.getId())) {
|
||||
final long waitMs = config.getTaskAssignmentTimeout().toStandardDuration().getMillis();
|
||||
statusLock.wait(waitMs);
|
||||
long elapsed = timeoutStopwatch.elapsed(TimeUnit.MILLISECONDS);
|
||||
|
@ -563,7 +594,7 @@ public class RemoteTaskRunner implements TaskRunner, TaskLogStreamer
|
|||
config.getTaskAssignmentTimeout()
|
||||
);
|
||||
|
||||
taskComplete(taskRunnerWorkItem, theZkWorker, task.getId(), TaskStatus.failure(task.getId()));
|
||||
taskComplete(taskRunnerWorkItem, theZkWorker, TaskStatus.failure(task.getId()));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -575,15 +606,17 @@ public class RemoteTaskRunner implements TaskRunner, TaskLogStreamer
|
|||
* the worker. Status changes indicate the creation or completion of a task.
|
||||
* The RemoteTaskRunner updates state according to these changes.
|
||||
*
|
||||
* @param worker - contains metadata for a worker that has appeared in ZK
|
||||
* @param worker contains metadata for a worker that has appeared in ZK
|
||||
* @return future that will contain a fully initialized worker
|
||||
*/
|
||||
private ZkWorker addWorker(final Worker worker, PathChildrenCache.StartMode startMode)
|
||||
private ListenableFuture<ZkWorker> addWorker(final Worker worker)
|
||||
{
|
||||
log.info("Worker[%s] reportin' for duty!", worker.getHost());
|
||||
|
||||
try {
|
||||
final String workerStatusPath = JOINER.join(zkPaths.getIndexerStatusPath(), worker.getHost());
|
||||
final PathChildrenCache statusCache = pathChildrenCacheFactory.make(cf, workerStatusPath);
|
||||
final SettableFuture<ZkWorker> retVal = SettableFuture.create();
|
||||
final ZkWorker zkWorker = new ZkWorker(
|
||||
worker,
|
||||
statusCache,
|
||||
|
@ -597,8 +630,8 @@ public class RemoteTaskRunner implements TaskRunner, TaskLogStreamer
|
|||
@Override
|
||||
public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception
|
||||
{
|
||||
String taskId;
|
||||
RemoteTaskRunnerWorkItem taskRunnerWorkItem;
|
||||
final String taskId;
|
||||
final RemoteTaskRunnerWorkItem taskRunnerWorkItem;
|
||||
synchronized (statusLock) {
|
||||
try {
|
||||
switch (event.getType()) {
|
||||
|
@ -617,19 +650,27 @@ public class RemoteTaskRunner implements TaskRunner, TaskLogStreamer
|
|||
);
|
||||
|
||||
// Synchronizing state with ZK
|
||||
statusLock.notify();
|
||||
statusLock.notifyAll();
|
||||
|
||||
taskRunnerWorkItem = runningTasks.get(taskId);
|
||||
if (taskRunnerWorkItem == null) {
|
||||
final RemoteTaskRunnerWorkItem tmp;
|
||||
if ((tmp = runningTasks.get(taskId)) != null) {
|
||||
taskRunnerWorkItem = tmp;
|
||||
} else {
|
||||
log.warn(
|
||||
"WTF?! Worker[%s] announcing a status for a task I didn't know about: %s",
|
||||
"Worker[%s] announced a status for a task I didn't know about, adding to runningTasks: %s",
|
||||
zkWorker.getWorker().getHost(),
|
||||
taskId
|
||||
);
|
||||
taskRunnerWorkItem = new RemoteTaskRunnerWorkItem(
|
||||
taskId,
|
||||
SettableFuture.<TaskStatus>create(),
|
||||
zkWorker.getWorker()
|
||||
);
|
||||
runningTasks.put(taskId, taskRunnerWorkItem);
|
||||
}
|
||||
|
||||
if (taskStatus.isComplete()) {
|
||||
taskComplete(taskRunnerWorkItem, zkWorker, taskId, taskStatus);
|
||||
taskComplete(taskRunnerWorkItem, zkWorker, taskStatus);
|
||||
runPendingTasks();
|
||||
}
|
||||
break;
|
||||
|
@ -638,11 +679,26 @@ public class RemoteTaskRunner implements TaskRunner, TaskLogStreamer
|
|||
taskRunnerWorkItem = runningTasks.remove(taskId);
|
||||
if (taskRunnerWorkItem != null) {
|
||||
log.info("Task[%s] just disappeared!", taskId);
|
||||
taskRunnerWorkItem.setResult(TaskStatus.failure(taskRunnerWorkItem.getTask().getId()));
|
||||
taskRunnerWorkItem.setResult(TaskStatus.failure(taskRunnerWorkItem.getTaskId()));
|
||||
} else {
|
||||
log.info("Task[%s] went bye bye.", taskId);
|
||||
}
|
||||
break;
|
||||
case INITIALIZED:
|
||||
if (zkWorkers.putIfAbsent(worker.getHost(), zkWorker) == null) {
|
||||
retVal.set(zkWorker);
|
||||
} else {
|
||||
final String message = String.format(
|
||||
"WTF?! Tried to add already-existing worker[%s]",
|
||||
worker.getHost()
|
||||
);
|
||||
log.makeAlert(message)
|
||||
.addData("workerHost", worker.getHost())
|
||||
.addData("workerIp", worker.getIp())
|
||||
.emit();
|
||||
retVal.setException(new IllegalStateException(message));
|
||||
}
|
||||
runPendingTasks();
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
|
@ -655,13 +711,8 @@ public class RemoteTaskRunner implements TaskRunner, TaskLogStreamer
|
|||
}
|
||||
}
|
||||
);
|
||||
|
||||
zkWorker.start(startMode);
|
||||
zkWorkers.put(worker.getHost(), zkWorker);
|
||||
|
||||
runPendingTasks();
|
||||
|
||||
return zkWorker;
|
||||
zkWorker.start();
|
||||
return retVal;
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw Throwables.propagate(e);
|
||||
|
@ -707,7 +758,7 @@ public class RemoteTaskRunner implements TaskRunner, TaskLogStreamer
|
|||
}
|
||||
|
||||
log.info("Failing task[%s]", assignedTask);
|
||||
taskRunnerWorkItem.setResult(TaskStatus.failure(taskRunnerWorkItem.getTask().getId()));
|
||||
taskRunnerWorkItem.setResult(TaskStatus.failure(taskRunnerWorkItem.getTaskId()));
|
||||
} else {
|
||||
log.warn("RemoteTaskRunner has no knowledge of task[%s]", assignedTask);
|
||||
}
|
||||
|
@ -763,19 +814,27 @@ public class RemoteTaskRunner implements TaskRunner, TaskLogStreamer
|
|||
private void taskComplete(
|
||||
RemoteTaskRunnerWorkItem taskRunnerWorkItem,
|
||||
ZkWorker zkWorker,
|
||||
String taskId,
|
||||
TaskStatus taskStatus
|
||||
)
|
||||
{
|
||||
if (taskRunnerWorkItem != null) {
|
||||
final ListenableFuture<TaskStatus> result = taskRunnerWorkItem.getResult();
|
||||
if (result != null) {
|
||||
((SettableFuture<TaskStatus>) result).set(taskStatus);
|
||||
}
|
||||
}
|
||||
|
||||
Preconditions.checkNotNull(taskRunnerWorkItem, "taskRunnerWorkItem");
|
||||
Preconditions.checkNotNull(zkWorker, "zkWorker");
|
||||
Preconditions.checkNotNull(taskStatus, "taskStatus");
|
||||
log.info(
|
||||
"Worker[%s] completed task[%s] with status[%s]",
|
||||
zkWorker.getWorker().getHost(),
|
||||
taskStatus.getId(),
|
||||
taskStatus.getStatusCode()
|
||||
);
|
||||
// Worker is done with this task
|
||||
zkWorker.setLastCompletedTaskTime(new DateTime());
|
||||
cleanup(zkWorker.getWorker().getHost(), taskId);
|
||||
// Move from running -> complete
|
||||
completeTasks.put(taskStatus.getId(), taskRunnerWorkItem);
|
||||
runningTasks.remove(taskStatus.getId());
|
||||
// Notify interested parties
|
||||
final ListenableFuture<TaskStatus> result = taskRunnerWorkItem.getResult();
|
||||
if (result != null) {
|
||||
((SettableFuture<TaskStatus>) result).set(taskStatus);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@ package io.druid.indexing.overlord;
|
|||
|
||||
import com.google.common.util.concurrent.SettableFuture;
|
||||
import io.druid.indexing.common.TaskStatus;
|
||||
import io.druid.indexing.common.task.Task;
|
||||
import io.druid.indexing.worker.Worker;
|
||||
import org.joda.time.DateTime;
|
||||
|
||||
|
@ -33,25 +32,25 @@ public class RemoteTaskRunnerWorkItem extends TaskRunnerWorkItem
|
|||
private final Worker worker;
|
||||
|
||||
public RemoteTaskRunnerWorkItem(
|
||||
Task task,
|
||||
String taskId,
|
||||
SettableFuture<TaskStatus> result,
|
||||
Worker worker
|
||||
)
|
||||
{
|
||||
super(task, result);
|
||||
super(taskId, result);
|
||||
this.result = result;
|
||||
this.worker = worker;
|
||||
}
|
||||
|
||||
public RemoteTaskRunnerWorkItem(
|
||||
Task task,
|
||||
String taskId,
|
||||
SettableFuture<TaskStatus> result,
|
||||
DateTime createdTime,
|
||||
DateTime queueInsertionTime,
|
||||
Worker worker
|
||||
)
|
||||
{
|
||||
super(task, result, createdTime, queueInsertionTime);
|
||||
super(taskId, result, createdTime, queueInsertionTime);
|
||||
this.result = result;
|
||||
this.worker = worker;
|
||||
}
|
||||
|
@ -69,11 +68,11 @@ public class RemoteTaskRunnerWorkItem extends TaskRunnerWorkItem
|
|||
@Override
|
||||
public RemoteTaskRunnerWorkItem withQueueInsertionTime(DateTime time)
|
||||
{
|
||||
return new RemoteTaskRunnerWorkItem(getTask(), result, getCreatedTime(), time, worker);
|
||||
return new RemoteTaskRunnerWorkItem(getTaskId(), result, getCreatedTime(), time, worker);
|
||||
}
|
||||
|
||||
public RemoteTaskRunnerWorkItem withWorker(Worker theWorker)
|
||||
{
|
||||
return new RemoteTaskRunnerWorkItem(getTask(), result, getCreatedTime(), getQueueInsertionTime(), theWorker);
|
||||
return new RemoteTaskRunnerWorkItem(getTaskId(), result, getCreatedTime(), getQueueInsertionTime(), theWorker);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,17 +19,20 @@
|
|||
|
||||
package io.druid.indexing.overlord;
|
||||
|
||||
import com.google.api.client.repackaged.com.google.common.base.Preconditions;
|
||||
import com.google.common.base.Function;
|
||||
import com.google.common.base.Objects;
|
||||
import com.google.common.base.Optional;
|
||||
import com.google.common.base.Predicate;
|
||||
import com.google.common.collect.ComparisonChain;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
import com.google.common.collect.Ordering;
|
||||
import com.google.common.collect.Sets;
|
||||
import com.google.inject.Inject;
|
||||
import com.metamx.common.IAE;
|
||||
import com.metamx.common.Pair;
|
||||
import com.metamx.common.guava.Comparators;
|
||||
import com.metamx.common.guava.FunctionalIterable;
|
||||
import com.metamx.emitter.EmittingLogger;
|
||||
|
@ -73,8 +76,91 @@ public class TaskLockbox
|
|||
}
|
||||
|
||||
/**
|
||||
* Locks a task without removing it from the queue. Blocks until the lock is acquired. Throws an exception
|
||||
* if the lock cannot be acquired.
|
||||
* Wipe out our current in-memory state and resync it from our bundled {@link io.druid.indexing.overlord.TaskStorage}.
|
||||
*/
|
||||
public void syncFromStorage()
|
||||
{
|
||||
giant.lock();
|
||||
|
||||
try {
|
||||
// Load stuff from taskStorage first. If this fails, we don't want to lose all our locks.
|
||||
final List<Pair<Task, TaskLock>> storedLocks = Lists.newArrayList();
|
||||
for (final Task task : taskStorage.getActiveTasks()) {
|
||||
for (final TaskLock taskLock : taskStorage.getLocks(task.getId())) {
|
||||
storedLocks.add(Pair.of(task, taskLock));
|
||||
}
|
||||
}
|
||||
// Sort locks by version, so we add them back in the order they were acquired.
|
||||
final Ordering<Pair<Task, TaskLock>> byVersionOrdering = new Ordering<Pair<Task, TaskLock>>()
|
||||
{
|
||||
@Override
|
||||
public int compare(Pair<Task, TaskLock> left, Pair<Task, TaskLock> right)
|
||||
{
|
||||
// The second compare shouldn't be necessary, but, whatever.
|
||||
return ComparisonChain.start()
|
||||
.compare(left.rhs.getVersion(), right.rhs.getVersion())
|
||||
.compare(left.lhs.getId(), right.lhs.getId())
|
||||
.result();
|
||||
}
|
||||
};
|
||||
running.clear();
|
||||
// Bookkeeping for a log message at the end
|
||||
final Set<String> uniqueTaskIds = Sets.newHashSet();
|
||||
int taskLockCount = 0;
|
||||
for (final Pair<Task, TaskLock> taskAndLock : byVersionOrdering.sortedCopy(storedLocks)) {
|
||||
final Task task = taskAndLock.lhs;
|
||||
final TaskLock savedTaskLock = taskAndLock.rhs;
|
||||
if (savedTaskLock.getInterval().toDurationMillis() <= 0) {
|
||||
// "Impossible", but you never know what crazy stuff can be restored from storage.
|
||||
log.warn("WTF?! Got lock with empty interval for task: %s", task.getId());
|
||||
continue;
|
||||
}
|
||||
uniqueTaskIds.add(task.getId());
|
||||
final Optional<TaskLock> acquiredTaskLock = tryLock(
|
||||
task,
|
||||
savedTaskLock.getInterval(),
|
||||
Optional.of(savedTaskLock.getVersion())
|
||||
);
|
||||
if (acquiredTaskLock.isPresent() && savedTaskLock.getVersion().equals(acquiredTaskLock.get().getVersion())) {
|
||||
taskLockCount ++;
|
||||
log.info(
|
||||
"Reacquired lock on interval[%s] version[%s] for task: %s",
|
||||
savedTaskLock.getInterval(),
|
||||
savedTaskLock.getVersion(),
|
||||
task.getId()
|
||||
);
|
||||
} else if (acquiredTaskLock.isPresent()) {
|
||||
taskLockCount ++;
|
||||
log.info(
|
||||
"Could not reacquire lock on interval[%s] version[%s] (got version[%s] instead) for task: %s",
|
||||
savedTaskLock.getInterval(),
|
||||
savedTaskLock.getVersion(),
|
||||
acquiredTaskLock.get().getVersion(),
|
||||
task.getId()
|
||||
);
|
||||
} else {
|
||||
log.info(
|
||||
"Could not reacquire lock on interval[%s] version[%s] for task: %s",
|
||||
savedTaskLock.getInterval(),
|
||||
savedTaskLock.getVersion(),
|
||||
task.getId()
|
||||
);
|
||||
}
|
||||
}
|
||||
log.info(
|
||||
"Synced %,d locks for %,d tasks from storage (%,d locks ignored).",
|
||||
taskLockCount,
|
||||
uniqueTaskIds.size(),
|
||||
storedLocks.size() - taskLockCount
|
||||
);
|
||||
} finally {
|
||||
giant.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Acquires a lock on behalf of a task. Blocks until the lock is acquired. Throws an exception if the lock
|
||||
* cannot be acquired.
|
||||
*/
|
||||
public TaskLock lock(final Task task, final Interval interval) throws InterruptedException
|
||||
{
|
||||
|
@ -97,7 +183,8 @@ public class TaskLockbox
|
|||
* Attempt to lock a task, without removing it from the queue. Equivalent to the long form of {@code tryLock}
|
||||
* with no preferred version.
|
||||
*
|
||||
* @param task task to attempt to lock
|
||||
* @param task task that wants a lock
|
||||
* @param interval interval to lock
|
||||
*
|
||||
* @return lock version if lock was acquired, absent otherwise
|
||||
*/
|
||||
|
@ -113,22 +200,18 @@ public class TaskLockbox
|
|||
* is only mostly guaranteed, however; we assume clock monotonicity and we assume that callers specifying
|
||||
* {@code preferredVersion} are doing the right thing.
|
||||
*
|
||||
* @param task task to attempt to lock
|
||||
* @param task task that wants a lock
|
||||
* @param interval interval to lock
|
||||
* @param preferredVersion use this version string if one has not yet been assigned
|
||||
*
|
||||
* @return lock version if lock was acquired, absent otherwise
|
||||
*/
|
||||
public Optional<TaskLock> tryLock(final Task task, final Interval interval, final Optional<String> preferredVersion)
|
||||
private Optional<TaskLock> tryLock(final Task task, final Interval interval, final Optional<String> preferredVersion)
|
||||
{
|
||||
giant.lock();
|
||||
|
||||
try {
|
||||
|
||||
if(task.getImplicitLockInterval().isPresent() && !task.getImplicitLockInterval().get().equals(interval)) {
|
||||
// Task may only lock its fixed interval, if present
|
||||
throw new IAE("Task must lock its fixed interval: %s", task.getId());
|
||||
}
|
||||
|
||||
Preconditions.checkArgument(interval.toDurationMillis() > 0, "interval empty");
|
||||
final String dataSource = task.getDataSource();
|
||||
final List<TaskLockPosse> foundPosses = findLockPossesForInterval(dataSource, interval);
|
||||
final TaskLockPosse posseToUse;
|
||||
|
@ -184,9 +267,10 @@ public class TaskLockbox
|
|||
if (posseToUse.getTaskIds().add(task.getId())) {
|
||||
log.info("Added task[%s] to TaskLock[%s]", task.getId(), posseToUse.getTaskLock().getGroupId());
|
||||
|
||||
// Best effort to update task storage facility
|
||||
// Update task storage facility. If it fails, revoke the lock.
|
||||
try {
|
||||
taskStorage.addLock(task.getId(), posseToUse.getTaskLock());
|
||||
return Optional.of(posseToUse.getTaskLock());
|
||||
} catch(Exception e) {
|
||||
log.makeAlert("Failed to persist lock in storage")
|
||||
.addData("task", task.getId())
|
||||
|
@ -194,12 +278,13 @@ public class TaskLockbox
|
|||
.addData("interval", posseToUse.getTaskLock().getInterval())
|
||||
.addData("version", posseToUse.getTaskLock().getVersion())
|
||||
.emit();
|
||||
unlock(task, interval);
|
||||
return Optional.absent();
|
||||
}
|
||||
} else {
|
||||
log.info("Task[%s] already present in TaskLock[%s]", task.getId(), posseToUse.getTaskLock().getGroupId());
|
||||
return Optional.of(posseToUse.getTaskLock());
|
||||
}
|
||||
|
||||
return Optional.of(posseToUse.getTaskLock());
|
||||
}
|
||||
finally {
|
||||
giant.unlock();
|
||||
|
@ -271,7 +356,7 @@ public class TaskLockbox
|
|||
// Wake up blocking-lock waiters
|
||||
lockReleaseCondition.signalAll();
|
||||
|
||||
// Best effort to remove lock from storage
|
||||
// Remove lock from storage. If it cannot be removed, just ignore the failure.
|
||||
try {
|
||||
taskStorage.removeLock(task.getId(), taskLock);
|
||||
} catch(Exception e) {
|
||||
|
@ -315,20 +400,6 @@ public class TaskLockbox
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes all locks from this lockbox.
|
||||
*/
|
||||
public void clear()
|
||||
{
|
||||
giant.lock();
|
||||
|
||||
try {
|
||||
running.clear();
|
||||
} finally {
|
||||
giant.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the currently-active lock posses for some task.
|
||||
*
|
||||
|
@ -341,17 +412,12 @@ public class TaskLockbox
|
|||
try {
|
||||
final Iterable<TaskLockPosse> searchSpace;
|
||||
|
||||
if (task.getImplicitLockInterval().isPresent()) {
|
||||
// Narrow down search using findLockPossesForInterval
|
||||
searchSpace = findLockPossesForInterval(task.getDataSource(), task.getImplicitLockInterval().get());
|
||||
// Scan through all locks for this datasource
|
||||
final NavigableMap<Interval, TaskLockPosse> dsRunning = running.get(task.getDataSource());
|
||||
if(dsRunning == null) {
|
||||
searchSpace = ImmutableList.of();
|
||||
} else {
|
||||
// Scan through all locks for this datasource
|
||||
final NavigableMap<Interval, TaskLockPosse> dsRunning = running.get(task.getDataSource());
|
||||
if(dsRunning == null) {
|
||||
searchSpace = ImmutableList.of();
|
||||
} else {
|
||||
searchSpace = dsRunning.values();
|
||||
}
|
||||
searchSpace = dsRunning.values();
|
||||
}
|
||||
|
||||
return ImmutableList.copyOf(
|
||||
|
|
|
@ -34,7 +34,7 @@ import io.druid.guice.annotations.Self;
|
|||
import io.druid.indexing.common.actions.TaskActionClient;
|
||||
import io.druid.indexing.common.actions.TaskActionClientFactory;
|
||||
import io.druid.indexing.common.task.Task;
|
||||
import io.druid.indexing.overlord.exec.TaskConsumer;
|
||||
import io.druid.indexing.overlord.config.TaskQueueConfig;
|
||||
import io.druid.indexing.overlord.scaling.ResourceManagementScheduler;
|
||||
import io.druid.indexing.overlord.scaling.ResourceManagementSchedulerFactory;
|
||||
import io.druid.server.DruidNode;
|
||||
|
@ -56,20 +56,22 @@ public class TaskMaster
|
|||
private final LeaderSelector leaderSelector;
|
||||
private final ReentrantLock giant = new ReentrantLock();
|
||||
private final Condition mayBeStopped = giant.newCondition();
|
||||
private final TaskQueue taskQueue;
|
||||
private final TaskActionClientFactory taskActionClientFactory;
|
||||
|
||||
private final AtomicReference<Lifecycle> leaderLifecycleRef = new AtomicReference<Lifecycle>(null);
|
||||
private final AtomicReference<Lifecycle> leaderLifecycleRef = new AtomicReference<>(null);
|
||||
|
||||
private volatile boolean leading = false;
|
||||
private volatile TaskRunner taskRunner;
|
||||
private volatile TaskQueue taskQueue;
|
||||
private volatile ResourceManagementScheduler resourceManagementScheduler;
|
||||
|
||||
private static final EmittingLogger log = new EmittingLogger(TaskMaster.class);
|
||||
|
||||
@Inject
|
||||
public TaskMaster(
|
||||
final TaskQueue taskQueue,
|
||||
final TaskQueueConfig taskQueueConfig,
|
||||
final TaskLockbox taskLockbox,
|
||||
final TaskStorage taskStorage,
|
||||
final TaskActionClientFactory taskActionClientFactory,
|
||||
@Self final DruidNode node,
|
||||
final ZkPathsConfig zkPaths,
|
||||
|
@ -80,118 +82,99 @@ public class TaskMaster
|
|||
final ServiceEmitter emitter
|
||||
)
|
||||
{
|
||||
this.taskQueue = taskQueue;
|
||||
this.taskActionClientFactory = taskActionClientFactory;
|
||||
|
||||
this.leaderSelector = new LeaderSelector(
|
||||
curator, zkPaths.getIndexerLeaderLatchPath(), new LeaderSelectorListener()
|
||||
{
|
||||
@Override
|
||||
public void takeLeadership(CuratorFramework client) throws Exception
|
||||
{
|
||||
giant.lock();
|
||||
curator,
|
||||
zkPaths.getIndexerLeaderLatchPath(),
|
||||
new LeaderSelectorListener()
|
||||
{
|
||||
@Override
|
||||
public void takeLeadership(CuratorFramework client) throws Exception
|
||||
{
|
||||
giant.lock();
|
||||
|
||||
try {
|
||||
log.info("By the power of Grayskull, I have the power!");
|
||||
try {
|
||||
// Make sure the previous leadership cycle is really, really over.
|
||||
stopLeading();
|
||||
|
||||
taskRunner = runnerFactory.build();
|
||||
final TaskConsumer taskConsumer = new TaskConsumer(
|
||||
taskQueue,
|
||||
taskRunner,
|
||||
taskActionClientFactory,
|
||||
emitter
|
||||
);
|
||||
// I AM THE MASTER OF THE UNIVERSE.
|
||||
log.info("By the power of Grayskull, I have the power!");
|
||||
taskLockbox.syncFromStorage();
|
||||
taskRunner = runnerFactory.build();
|
||||
taskQueue = new TaskQueue(
|
||||
taskQueueConfig,
|
||||
taskStorage,
|
||||
taskRunner,
|
||||
taskActionClientFactory,
|
||||
taskLockbox,
|
||||
emitter
|
||||
);
|
||||
|
||||
// Bootstrap task queue and task lockbox (load state stuff from the database)
|
||||
taskQueue.bootstrap();
|
||||
|
||||
// Sensible order to start stuff:
|
||||
final Lifecycle leaderLifecycle = new Lifecycle();
|
||||
if (leaderLifecycleRef.getAndSet(leaderLifecycle) != null) {
|
||||
log.makeAlert("TaskMaster set a new Lifecycle without the old one being cleared! Race condition")
|
||||
.emit();
|
||||
}
|
||||
|
||||
leaderLifecycle.addManagedInstance(taskRunner);
|
||||
leaderLifecycle.addHandler(
|
||||
new Lifecycle.Handler()
|
||||
{
|
||||
@Override
|
||||
public void start() throws Exception
|
||||
{
|
||||
taskRunner.bootstrap(taskQueue.snapshot());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void stop()
|
||||
{
|
||||
// Sensible order to start stuff:
|
||||
final Lifecycle leaderLifecycle = new Lifecycle();
|
||||
if (leaderLifecycleRef.getAndSet(leaderLifecycle) != null) {
|
||||
log.makeAlert("TaskMaster set a new Lifecycle without the old one being cleared! Race condition")
|
||||
.emit();
|
||||
}
|
||||
leaderLifecycle.addManagedInstance(taskRunner);
|
||||
if (taskRunner instanceof RemoteTaskRunner) {
|
||||
final ScheduledExecutorFactory executorFactory = ScheduledExecutors.createFactory(leaderLifecycle);
|
||||
resourceManagementScheduler = managementSchedulerFactory.build(
|
||||
(RemoteTaskRunner) taskRunner,
|
||||
executorFactory
|
||||
);
|
||||
leaderLifecycle.addManagedInstance(resourceManagementScheduler);
|
||||
}
|
||||
leaderLifecycle.addManagedInstance(taskQueue);
|
||||
leaderLifecycle.addHandler(
|
||||
new Lifecycle.Handler()
|
||||
{
|
||||
@Override
|
||||
public void start() throws Exception
|
||||
{
|
||||
serviceAnnouncer.announce(node);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void stop()
|
||||
{
|
||||
serviceAnnouncer.unannounce(node);
|
||||
}
|
||||
}
|
||||
);
|
||||
try {
|
||||
leaderLifecycle.start();
|
||||
leading = true;
|
||||
while (leading && !Thread.currentThread().isInterrupted()) {
|
||||
mayBeStopped.await();
|
||||
}
|
||||
}
|
||||
);
|
||||
leaderLifecycle.addManagedInstance(taskQueue);
|
||||
|
||||
leaderLifecycle.addHandler(
|
||||
new Lifecycle.Handler()
|
||||
{
|
||||
@Override
|
||||
public void start() throws Exception
|
||||
{
|
||||
serviceAnnouncer.announce(node);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void stop()
|
||||
{
|
||||
serviceAnnouncer.unannounce(node);
|
||||
}
|
||||
catch (InterruptedException e) {
|
||||
// Suppress so we can bow out gracefully
|
||||
}
|
||||
);
|
||||
leaderLifecycle.addManagedInstance(taskConsumer);
|
||||
|
||||
if (taskRunner instanceof RemoteTaskRunner) {
|
||||
final ScheduledExecutorFactory executorFactory = ScheduledExecutors.createFactory(leaderLifecycle);
|
||||
resourceManagementScheduler = managementSchedulerFactory.build(
|
||||
(RemoteTaskRunner) taskRunner,
|
||||
executorFactory
|
||||
);
|
||||
leaderLifecycle.addManagedInstance(resourceManagementScheduler);
|
||||
}
|
||||
|
||||
try {
|
||||
leaderLifecycle.start();
|
||||
leading = true;
|
||||
|
||||
while (leading && !Thread.currentThread().isInterrupted()) {
|
||||
mayBeStopped.await();
|
||||
finally {
|
||||
log.info("Bowing out!");
|
||||
stopLeading();
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
log.makeAlert(e, "Failed to lead").emit();
|
||||
throw Throwables.propagate(e);
|
||||
}
|
||||
finally {
|
||||
giant.unlock();
|
||||
}
|
||||
}
|
||||
catch (InterruptedException e) {
|
||||
// Suppress so we can bow out gracefully
|
||||
}
|
||||
finally {
|
||||
log.info("Bowing out!");
|
||||
stopLeading();
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
log.makeAlert(e, "Failed to lead").emit();
|
||||
throw Throwables.propagate(e);
|
||||
}
|
||||
finally {
|
||||
giant.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void stateChanged(CuratorFramework client, ConnectionState newState)
|
||||
{
|
||||
if (newState == ConnectionState.LOST || newState == ConnectionState.SUSPENDED) {
|
||||
// disconnected from zk. assume leadership is gone
|
||||
stopLeading();
|
||||
@Override
|
||||
public void stateChanged(CuratorFramework client, ConnectionState newState)
|
||||
{
|
||||
if (newState == ConnectionState.LOST || newState == ConnectionState.SUSPENDED) {
|
||||
// disconnected from zk. assume leadership is gone
|
||||
stopLeading();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
leaderSelector.setId(node.getHost());
|
||||
|
|
|
@ -19,172 +19,102 @@
|
|||
|
||||
package io.druid.indexing.overlord;
|
||||
|
||||
import com.google.api.client.util.Maps;
|
||||
import com.google.common.base.Function;
|
||||
import com.google.common.base.Optional;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.base.Throwables;
|
||||
import com.google.common.collect.ArrayListMultimap;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Multimap;
|
||||
import com.google.common.collect.Ordering;
|
||||
import com.google.common.collect.Sets;
|
||||
import com.google.common.util.concurrent.FutureCallback;
|
||||
import com.google.common.util.concurrent.Futures;
|
||||
import com.google.common.util.concurrent.ListenableFuture;
|
||||
import com.google.common.util.concurrent.ThreadFactoryBuilder;
|
||||
import com.google.inject.Inject;
|
||||
import com.metamx.common.ISE;
|
||||
import com.metamx.common.concurrent.ScheduledExecutors;
|
||||
import com.metamx.common.lifecycle.LifecycleStart;
|
||||
import com.metamx.common.lifecycle.LifecycleStop;
|
||||
import com.metamx.emitter.EmittingLogger;
|
||||
import io.druid.indexing.common.TaskLock;
|
||||
import com.metamx.emitter.service.ServiceEmitter;
|
||||
import com.metamx.emitter.service.ServiceMetricEvent;
|
||||
import io.druid.indexing.common.TaskStatus;
|
||||
import io.druid.indexing.common.actions.TaskActionClientFactory;
|
||||
import io.druid.indexing.common.task.Task;
|
||||
import io.druid.indexing.overlord.config.TaskQueueConfig;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.Duration;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.locks.Condition;
|
||||
import java.util.concurrent.locks.ReentrantLock;
|
||||
|
||||
/**
|
||||
* Interface between task producers and task consumers.
|
||||
* Interface between task producers and the task runner.
|
||||
* <p/>
|
||||
* The queue accepts tasks from producers using {@link #add} and delivers tasks to consumers using either
|
||||
* {@link #take} or {@link #poll}. Ordering is mostly-FIFO, with deviations when the natural next task would conflict
|
||||
* with a currently-running task. In that case, tasks are skipped until a runnable one is found.
|
||||
* This object accepts tasks from producers using {@link #add} and manages delivery of these tasks to a
|
||||
* {@link TaskRunner}. Tasks will run in a mostly-FIFO order, with deviations when the natural next task is not ready
|
||||
* in time (based on its {@link Task#isReady} method).
|
||||
* <p/>
|
||||
* To manage locking, the queue keeps track of currently-running tasks as {@link io.druid.indexing.common.TaskLock} objects. The idea is that
|
||||
* only one TaskLock can be running on a particular dataSource + interval, and that TaskLock has a single version
|
||||
* string that all tasks in the group must use to publish segments. Tasks in the same TaskLock may run concurrently.
|
||||
* <p/>
|
||||
* For persistence, the queue saves new tasks from {@link #add} and task status updates from {@link #notify} using a
|
||||
* {@link TaskStorage} obj
|
||||
* <p/>
|
||||
* To support leader election of our containing system, the queue can be stopped (in which case it will not accept
|
||||
* any new tasks, or hand out any more tasks, until started again).
|
||||
* For persistence, we save all new tasks and task status changes using a {@link TaskStorage} object.
|
||||
*/
|
||||
public class TaskQueue
|
||||
{
|
||||
private final List<Task> queue = Lists.newLinkedList();
|
||||
private final List<Task> tasks = Lists.newArrayList();
|
||||
private final Map<String, ListenableFuture<TaskStatus>> taskFutures = Maps.newHashMap();
|
||||
|
||||
private final TaskQueueConfig config;
|
||||
private final TaskStorage taskStorage;
|
||||
private final TaskRunner taskRunner;
|
||||
private final TaskActionClientFactory taskActionClientFactory;
|
||||
private final TaskLockbox taskLockbox;
|
||||
private final ServiceEmitter emitter;
|
||||
|
||||
private final ReentrantLock giant = new ReentrantLock();
|
||||
private final Condition workMayBeAvailable = giant.newCondition();
|
||||
private final Condition managementMayBeNecessary = giant.newCondition();
|
||||
private final ExecutorService managerExec = Executors.newSingleThreadExecutor(
|
||||
new ThreadFactoryBuilder()
|
||||
.setDaemon(false)
|
||||
.setNameFormat("TaskQueue-Manager").build()
|
||||
);
|
||||
private final ScheduledExecutorService storageSyncExec = Executors.newSingleThreadScheduledExecutor(
|
||||
new ThreadFactoryBuilder()
|
||||
.setDaemon(false)
|
||||
.setNameFormat("TaskQueue-StorageSync").build()
|
||||
);
|
||||
|
||||
private volatile boolean active = false;
|
||||
|
||||
private static final EmittingLogger log = new EmittingLogger(TaskQueue.class);
|
||||
|
||||
@Inject
|
||||
public TaskQueue(TaskStorage taskStorage, TaskLockbox taskLockbox)
|
||||
public TaskQueue(
|
||||
TaskQueueConfig config,
|
||||
TaskStorage taskStorage,
|
||||
TaskRunner taskRunner,
|
||||
TaskActionClientFactory taskActionClientFactory,
|
||||
TaskLockbox taskLockbox,
|
||||
ServiceEmitter emitter
|
||||
)
|
||||
{
|
||||
this.config = Preconditions.checkNotNull(config, "config");
|
||||
this.taskStorage = Preconditions.checkNotNull(taskStorage, "taskStorage");
|
||||
this.taskRunner = Preconditions.checkNotNull(taskRunner, "taskRunner");
|
||||
this.taskActionClientFactory = Preconditions.checkNotNull(taskActionClientFactory, "taskActionClientFactory");
|
||||
this.taskLockbox = Preconditions.checkNotNull(taskLockbox, "taskLockbox");
|
||||
this.emitter = Preconditions.checkNotNull(emitter, "emitter");
|
||||
}
|
||||
|
||||
/**
|
||||
* Bootstraps this task queue and associated task lockbox. Clears the lockbox before running. Should be called
|
||||
* while the queue is stopped. It is not a good idea to start the queue if this method fails.
|
||||
*/
|
||||
public void bootstrap()
|
||||
{
|
||||
// NOTE: Bootstraps can resurrect bogus stuff caused by leader races or whatevs.
|
||||
|
||||
// We may want to periodically fixup the database to refer to what we think is happening, to prevent
|
||||
// this from occurring and also so that bogus stuff is detected by clients in a timely manner.
|
||||
|
||||
giant.lock();
|
||||
|
||||
try {
|
||||
Preconditions.checkState(!active, "queue must be stopped");
|
||||
|
||||
log.info("Bootstrapping queue (and associated lockbox)");
|
||||
|
||||
queue.clear();
|
||||
taskLockbox.clear();
|
||||
|
||||
// Get all running tasks and their locks
|
||||
final Multimap<TaskLock, Task> tasksByLock = ArrayListMultimap.create();
|
||||
|
||||
for (final Task task : taskStorage.getRunningTasks()) {
|
||||
try {
|
||||
final List<TaskLock> taskLocks = taskStorage.getLocks(task.getId());
|
||||
|
||||
queue.add(task);
|
||||
|
||||
for (final TaskLock taskLock : taskLocks) {
|
||||
tasksByLock.put(taskLock, task);
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
log.makeAlert("Failed to bootstrap task").addData("task", task.getId()).emit();
|
||||
throw Throwables.propagate(e);
|
||||
}
|
||||
}
|
||||
|
||||
// Sort locks by version
|
||||
final Ordering<Map.Entry<TaskLock, Task>> byVersionOrdering = new Ordering<Map.Entry<TaskLock, Task>>()
|
||||
{
|
||||
@Override
|
||||
public int compare(Map.Entry<TaskLock, Task> left, Map.Entry<TaskLock, Task> right)
|
||||
{
|
||||
return left.getKey().getVersion().compareTo(right.getKey().getVersion());
|
||||
}
|
||||
};
|
||||
|
||||
// Acquire as many locks as possible, in version order
|
||||
for(final Map.Entry<TaskLock, Task> taskAndLock : byVersionOrdering.sortedCopy(tasksByLock.entries())) {
|
||||
final Task task = taskAndLock.getValue();
|
||||
final TaskLock savedTaskLock = taskAndLock.getKey();
|
||||
|
||||
final Optional<TaskLock> acquiredTaskLock = taskLockbox.tryLock(
|
||||
task,
|
||||
savedTaskLock.getInterval(),
|
||||
Optional.of(savedTaskLock.getVersion())
|
||||
);
|
||||
|
||||
if(acquiredTaskLock.isPresent() && savedTaskLock.getVersion().equals(acquiredTaskLock.get().getVersion())) {
|
||||
log.info(
|
||||
"Reacquired lock on interval[%s] version[%s] for task: %s",
|
||||
savedTaskLock.getInterval(),
|
||||
savedTaskLock.getVersion(),
|
||||
task.getId()
|
||||
);
|
||||
} else if(acquiredTaskLock.isPresent()) {
|
||||
log.info(
|
||||
"Could not reacquire lock on interval[%s] version[%s] (got version[%s] instead) for task: %s",
|
||||
savedTaskLock.getInterval(),
|
||||
savedTaskLock.getVersion(),
|
||||
acquiredTaskLock.get().getVersion(),
|
||||
task.getId()
|
||||
);
|
||||
} else {
|
||||
log.info(
|
||||
"Could not reacquire lock on interval[%s] version[%s] for task: %s",
|
||||
savedTaskLock.getInterval(),
|
||||
savedTaskLock.getVersion(),
|
||||
task.getId()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
log.info("Bootstrapped %,d tasks with %,d locks. Ready to go!", queue.size(), tasksByLock.keySet().size());
|
||||
} finally {
|
||||
giant.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an immutable snapshot of the current status of this queue.
|
||||
*/
|
||||
public List<Task> snapshot()
|
||||
{
|
||||
giant.lock();
|
||||
|
||||
try {
|
||||
return ImmutableList.copyOf(queue);
|
||||
} finally {
|
||||
giant.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Starts this task queue. Allows {@link #add(Task)} to accept new tasks. This should not be called on
|
||||
* an already-started queue.
|
||||
* Starts this task queue. Allows {@link #add(Task)} to accept new tasks.
|
||||
*/
|
||||
@LifecycleStart
|
||||
public void start()
|
||||
|
@ -193,9 +123,63 @@ public class TaskQueue
|
|||
|
||||
try {
|
||||
Preconditions.checkState(!active, "queue must be stopped");
|
||||
|
||||
active = true;
|
||||
workMayBeAvailable.signalAll();
|
||||
syncFromStorage();
|
||||
managerExec.submit(
|
||||
new Runnable()
|
||||
{
|
||||
@Override
|
||||
public void run()
|
||||
{
|
||||
while (true) {
|
||||
try {
|
||||
manage();
|
||||
break;
|
||||
}
|
||||
catch (InterruptedException e) {
|
||||
log.info("Interrupted, exiting!");
|
||||
break;
|
||||
}
|
||||
catch (Exception e) {
|
||||
final long restartDelay = config.getRestartDelay().getMillis();
|
||||
log.makeAlert(e, "Failed to manage").addData("restartDelay", restartDelay).emit();
|
||||
try {
|
||||
Thread.sleep(restartDelay);
|
||||
}
|
||||
catch (InterruptedException e2) {
|
||||
log.info("Interrupted, exiting!");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
);
|
||||
ScheduledExecutors.scheduleAtFixedRate(
|
||||
storageSyncExec,
|
||||
config.getStorageSyncRate(),
|
||||
new Callable<ScheduledExecutors.Signal>()
|
||||
{
|
||||
@Override
|
||||
public ScheduledExecutors.Signal call()
|
||||
{
|
||||
try {
|
||||
syncFromStorage();
|
||||
}
|
||||
catch (Exception e) {
|
||||
if (active) {
|
||||
log.makeAlert(e, "Failed to sync with storage").emit();
|
||||
}
|
||||
}
|
||||
if (active) {
|
||||
return ScheduledExecutors.Signal.REPEAT;
|
||||
} else {
|
||||
return ScheduledExecutors.Signal.STOP;
|
||||
}
|
||||
}
|
||||
}
|
||||
);
|
||||
managementMayBeNecessary.signalAll();
|
||||
}
|
||||
finally {
|
||||
giant.unlock();
|
||||
|
@ -203,8 +187,7 @@ public class TaskQueue
|
|||
}
|
||||
|
||||
/**
|
||||
* Shuts down the queue, for now. This may safely be called on an already-stopped queue. The queue may be restarted
|
||||
* if desired.
|
||||
* Shuts down the queue.
|
||||
*/
|
||||
@LifecycleStop
|
||||
public void stop()
|
||||
|
@ -212,16 +195,100 @@ public class TaskQueue
|
|||
giant.lock();
|
||||
|
||||
try {
|
||||
log.info("Naptime! Shutting down until we are started again.");
|
||||
queue.clear();
|
||||
taskLockbox.clear();
|
||||
tasks.clear();
|
||||
taskFutures.clear();
|
||||
active = false;
|
||||
managerExec.shutdownNow();
|
||||
storageSyncExec.shutdownNow();
|
||||
managementMayBeNecessary.signalAll();
|
||||
}
|
||||
finally {
|
||||
giant.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Main task runner management loop. Meant to run forever, or, at least until we're stopped.
|
||||
*/
|
||||
private void manage() throws InterruptedException
|
||||
{
|
||||
log.info("Beginning management in %s.", config.getStartDelay());
|
||||
Thread.sleep(config.getStartDelay().getMillis());
|
||||
|
||||
while (active) {
|
||||
giant.lock();
|
||||
|
||||
try {
|
||||
// Task futures available from the taskRunner
|
||||
final Map<String, ListenableFuture<TaskStatus>> runnerTaskFutures = Maps.newHashMap();
|
||||
for (final TaskRunnerWorkItem workItem : taskRunner.getKnownTasks()) {
|
||||
runnerTaskFutures.put(workItem.getTaskId(), workItem.getResult());
|
||||
}
|
||||
// Attain futures for all active tasks (assuming they are ready to run).
|
||||
// Copy tasks list, as notifyStatus may modify it.
|
||||
for (final Task task : ImmutableList.copyOf(tasks)) {
|
||||
if (!taskFutures.containsKey(task.getId())) {
|
||||
final ListenableFuture<TaskStatus> runnerTaskFuture;
|
||||
if (runnerTaskFutures.containsKey(task.getId())) {
|
||||
runnerTaskFuture = runnerTaskFutures.get(task.getId());
|
||||
} else {
|
||||
// Task should be running, so run it.
|
||||
final boolean taskIsReady;
|
||||
try {
|
||||
taskIsReady = task.isReady(taskActionClientFactory.create(task));
|
||||
}
|
||||
catch (Exception e) {
|
||||
log.warn(e, "Exception thrown during isReady for task: %s", task.getId());
|
||||
notifyStatus(task, TaskStatus.failure(task.getId()));
|
||||
continue;
|
||||
}
|
||||
if (taskIsReady) {
|
||||
log.info("Asking taskRunner to run: %s", task.getId());
|
||||
runnerTaskFuture = taskRunner.run(task);
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
taskFutures.put(task.getId(), attachCallbacks(task, runnerTaskFuture));
|
||||
}
|
||||
}
|
||||
// Kill tasks that shouldn't be running
|
||||
final Set<String> tasksToKill = Sets.difference(
|
||||
runnerTaskFutures.keySet(),
|
||||
ImmutableSet.copyOf(
|
||||
Lists.transform(
|
||||
tasks,
|
||||
new Function<Task, Object>()
|
||||
{
|
||||
@Override
|
||||
public String apply(Task task)
|
||||
{
|
||||
return task.getId();
|
||||
}
|
||||
}
|
||||
)
|
||||
)
|
||||
);
|
||||
if (!tasksToKill.isEmpty()) {
|
||||
log.info("Asking taskRunner to clean up %,d tasks.", tasksToKill.size());
|
||||
for (final String taskId : tasksToKill) {
|
||||
try {
|
||||
taskRunner.shutdown(taskId);
|
||||
} catch (Exception e) {
|
||||
log.warn(e, "TaskRunner failed to clean up task: %s", taskId);
|
||||
}
|
||||
}
|
||||
}
|
||||
// awaitNanos because management may become necessary without this condition signalling,
|
||||
// due to e.g. tasks becoming ready when other folks mess with the TaskLockbox.
|
||||
managementMayBeNecessary.awaitNanos(60000000000L /* 60 seconds */);
|
||||
}
|
||||
finally {
|
||||
giant.unlock();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds some work to the queue and the underlying task storage facility with a generic "running" status.
|
||||
*
|
||||
|
@ -236,26 +303,20 @@ public class TaskQueue
|
|||
try {
|
||||
Preconditions.checkState(active, "Queue is not active!");
|
||||
Preconditions.checkNotNull(task, "task");
|
||||
Preconditions.checkState(tasks.size() < config.getMaxSize(), "Too many tasks (max = %,d)", config.getMaxSize());
|
||||
|
||||
// If this throws with any sort of exception, including TaskExistsException, we don't want to
|
||||
// insert the task into our queue.
|
||||
try {
|
||||
taskStorage.insert(task, TaskStatus.running(task.getId()));
|
||||
} catch (TaskExistsException e) {
|
||||
}
|
||||
catch (TaskExistsException e) {
|
||||
log.warn("Attempt to add task twice: %s", task.getId());
|
||||
throw Throwables.propagate(e);
|
||||
}
|
||||
|
||||
queue.add(task);
|
||||
workMayBeAvailable.signalAll();
|
||||
|
||||
// Attempt to add this task to a running task group. Silently continue if this is not possible.
|
||||
// The main reason this is here is so when subtasks are added, they end up in the same task group
|
||||
// as their parent whenever possible.
|
||||
if(task.getImplicitLockInterval().isPresent()) {
|
||||
taskLockbox.tryLock(task, task.getImplicitLockInterval().get());
|
||||
}
|
||||
|
||||
tasks.add(task);
|
||||
managementMayBeNecessary.signalAll();
|
||||
return true;
|
||||
}
|
||||
finally {
|
||||
|
@ -264,62 +325,22 @@ public class TaskQueue
|
|||
}
|
||||
|
||||
/**
|
||||
* Locks and returns next doable work from the queue. Blocks if there is no doable work.
|
||||
*
|
||||
* @return runnable task
|
||||
* Shuts down a task if it has not yet finished.
|
||||
* @param taskId task to kill
|
||||
*/
|
||||
public Task take() throws InterruptedException
|
||||
public void shutdown(final String taskId)
|
||||
{
|
||||
giant.lock();
|
||||
|
||||
try {
|
||||
Task task;
|
||||
|
||||
log.info("Waiting for work...");
|
||||
|
||||
while ((task = poll()) == null) {
|
||||
// awaitNanos because work may become available without this condition signalling,
|
||||
// due to other folks messing with the taskLockbox
|
||||
workMayBeAvailable.awaitNanos(1000000000L /* 1 second */);
|
||||
}
|
||||
|
||||
return task;
|
||||
}
|
||||
finally {
|
||||
giant.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Locks and removes next doable work from the queue. Returns null if there is no doable work.
|
||||
*
|
||||
* @return runnable task or null
|
||||
*/
|
||||
public Task poll()
|
||||
{
|
||||
giant.lock();
|
||||
|
||||
try {
|
||||
for (final Task task : queue) {
|
||||
if(task.getImplicitLockInterval().isPresent()) {
|
||||
// If this task has a fixed interval, attempt to lock it right now.
|
||||
final Optional<TaskLock> maybeLock = taskLockbox.tryLock(task, task.getImplicitLockInterval().get());
|
||||
if(maybeLock.isPresent()) {
|
||||
log.info("Task claimed with fixed interval lock: %s", task.getId());
|
||||
queue.remove(task);
|
||||
return task;
|
||||
}
|
||||
} else {
|
||||
// No fixed interval. Let's just run this and see what happens.
|
||||
log.info("Task claimed with no fixed interval lock: %s", task.getId());
|
||||
queue.remove(task);
|
||||
return task;
|
||||
Preconditions.checkNotNull(taskId, "taskId");
|
||||
for (final Task task : tasks) {
|
||||
if (task.getId().equals(taskId)) {
|
||||
notifyStatus(task, TaskStatus.failure(taskId));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
finally {
|
||||
} finally {
|
||||
giant.unlock();
|
||||
}
|
||||
}
|
||||
|
@ -329,14 +350,14 @@ public class TaskQueue
|
|||
* the task storage facility. If the status is a completed status, the task will be unlocked and no further
|
||||
* updates will be accepted.
|
||||
*
|
||||
* @param task task to update
|
||||
* @param task task to update
|
||||
* @param taskStatus new task status
|
||||
*
|
||||
* @throws NullPointerException if task or status is null
|
||||
* @throws IllegalArgumentException if the task ID does not match the status ID
|
||||
* @throws IllegalStateException if this queue is currently shut down
|
||||
*/
|
||||
public void notify(final Task task, final TaskStatus taskStatus)
|
||||
private void notifyStatus(final Task task, final TaskStatus taskStatus)
|
||||
{
|
||||
giant.lock();
|
||||
|
||||
|
@ -350,38 +371,156 @@ public class TaskQueue
|
|||
task.getId(),
|
||||
taskStatus.getId()
|
||||
);
|
||||
|
||||
// Save status to DB
|
||||
boolean didPersistStatus = false;
|
||||
// Inform taskRunner that this task can be shut down
|
||||
try {
|
||||
final Optional<TaskStatus> previousStatus = taskStorage.getStatus(task.getId());
|
||||
if (!previousStatus.isPresent() || !previousStatus.get().isRunnable()) {
|
||||
log.makeAlert("Ignoring notification for dead task").addData("task", task.getId()).emit();
|
||||
return;
|
||||
} else {
|
||||
taskStorage.setStatus(taskStatus);
|
||||
didPersistStatus = true;
|
||||
}
|
||||
} catch(Exception e) {
|
||||
log.makeAlert(e, "Failed to persist status for task")
|
||||
.addData("task", task.getId())
|
||||
.addData("statusCode", taskStatus.getStatusCode())
|
||||
.emit();
|
||||
taskRunner.shutdown(task.getId());
|
||||
} catch (Exception e) {
|
||||
log.warn(e, "TaskRunner failed to cleanup task after completion: %s", task.getId());
|
||||
}
|
||||
|
||||
if(taskStatus.isComplete()) {
|
||||
if(didPersistStatus) {
|
||||
log.info("Task done: %s", task);
|
||||
taskLockbox.unlock(task);
|
||||
} else {
|
||||
log.warn("Status could not be persisted! Reinserting task: %s", task.getId());
|
||||
queue.add(task);
|
||||
// Remove from running tasks
|
||||
int removed = 0;
|
||||
for (int i = tasks.size() - 1 ; i >= 0 ; i--) {
|
||||
if (tasks.get(i).getId().equals(task.getId())) {
|
||||
removed ++;
|
||||
tasks.remove(i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (removed == 0) {
|
||||
log.warn("Unknown task completed: %s", task.getId());
|
||||
} else if (removed > 1) {
|
||||
log.makeAlert("Removed multiple copies of task").addData("count", removed).addData("task", task.getId()).emit();
|
||||
}
|
||||
// Remove from futures list
|
||||
taskFutures.remove(task.getId());
|
||||
if (removed > 0) {
|
||||
// If we thought this task should be running, save status to DB
|
||||
try {
|
||||
final Optional<TaskStatus> previousStatus = taskStorage.getStatus(task.getId());
|
||||
if (!previousStatus.isPresent() || !previousStatus.get().isRunnable()) {
|
||||
log.makeAlert("Ignoring notification for already-complete task").addData("task", task.getId()).emit();
|
||||
} else {
|
||||
taskStorage.setStatus(taskStatus);
|
||||
taskLockbox.unlock(task);
|
||||
log.info("Task done: %s", task);
|
||||
managementMayBeNecessary.signalAll();
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
log.makeAlert(e, "Failed to persist status for task")
|
||||
.addData("task", task.getId())
|
||||
.addData("statusCode", taskStatus.getStatusCode())
|
||||
.emit();
|
||||
}
|
||||
workMayBeAvailable.signalAll();
|
||||
}
|
||||
}
|
||||
finally {
|
||||
giant.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Attach success and failure handlers to a task status future, such that when it completes, we perform the
|
||||
* appropriate updates.
|
||||
*
|
||||
* @param statusFuture a task status future
|
||||
*
|
||||
* @return the same future, for convenience
|
||||
*/
|
||||
private ListenableFuture<TaskStatus> attachCallbacks(final Task task, final ListenableFuture<TaskStatus> statusFuture)
|
||||
{
|
||||
final ServiceMetricEvent.Builder metricBuilder = new ServiceMetricEvent.Builder()
|
||||
.setUser2(task.getDataSource())
|
||||
.setUser4(task.getType());
|
||||
Futures.addCallback(
|
||||
statusFuture,
|
||||
new FutureCallback<TaskStatus>()
|
||||
{
|
||||
@Override
|
||||
public void onSuccess(final TaskStatus status)
|
||||
{
|
||||
log.info("Received %s status for task: %s", status.getStatusCode(), status.getId());
|
||||
handleStatus(status);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(final Throwable t)
|
||||
{
|
||||
log.makeAlert(t, "Failed to run task")
|
||||
.addData("task", task.getId())
|
||||
.addData("type", task.getType())
|
||||
.addData("dataSource", task.getDataSource())
|
||||
.emit();
|
||||
handleStatus(TaskStatus.failure(task.getId()));
|
||||
}
|
||||
|
||||
private void handleStatus(final TaskStatus status)
|
||||
{
|
||||
try {
|
||||
// If we're not supposed to be running anymore, don't do anything. Somewhat racey if the flag gets set
|
||||
// after we check and before we commit the database transaction, but better than nothing.
|
||||
if (!active) {
|
||||
log.info("Abandoning task due to shutdown: %s", task.getId());
|
||||
return;
|
||||
}
|
||||
|
||||
notifyStatus(task, status);
|
||||
|
||||
// Emit event and log, if the task is done
|
||||
if (status.isComplete()) {
|
||||
metricBuilder.setUser3(status.getStatusCode().toString());
|
||||
emitter.emit(metricBuilder.build("indexer/time/run/millis", status.getDuration()));
|
||||
|
||||
log.info(
|
||||
"Task %s: %s (%d run duration)",
|
||||
status.getStatusCode(),
|
||||
task,
|
||||
status.getDuration()
|
||||
);
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
log.makeAlert(e, "Failed to handle task status")
|
||||
.addData("task", task.getId())
|
||||
.addData("statusCode", status.getStatusCode())
|
||||
.emit();
|
||||
}
|
||||
}
|
||||
}
|
||||
);
|
||||
return statusFuture;
|
||||
}
|
||||
|
||||
/**
|
||||
* Resync the contents of this task queue with our storage facility. Useful to make sure our in-memory state
|
||||
* corresponds to the storage facility even if the latter is manually modified.
|
||||
*/
|
||||
private void syncFromStorage()
|
||||
{
|
||||
giant.lock();
|
||||
|
||||
try {
|
||||
if (active) {
|
||||
final List<Task> newTasks = taskStorage.getActiveTasks();
|
||||
log.info(
|
||||
"Synced %,d tasks from storage (%,d tasks added, %,d tasks removed).",
|
||||
newTasks.size(),
|
||||
Sets.difference(Sets.newHashSet(newTasks), Sets.newHashSet(tasks)).size(),
|
||||
Sets.difference(Sets.newHashSet(tasks), Sets.newHashSet(newTasks)).size()
|
||||
);
|
||||
tasks.clear();
|
||||
tasks.addAll(newTasks);
|
||||
managementMayBeNecessary.signalAll();
|
||||
} else {
|
||||
log.info("Not active. Skipping storage sync.");
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
log.warn(e, "Failed to sync tasks from storage!");
|
||||
throw Throwables.propagate(e);
|
||||
}
|
||||
finally {
|
||||
giant.unlock();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,34 +24,24 @@ import io.druid.indexing.common.TaskStatus;
|
|||
import io.druid.indexing.common.task.Task;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Interface for handing off tasks. Used by a {@link io.druid.indexing.overlord.exec.TaskConsumer} to
|
||||
* run tasks that have been locked.
|
||||
* Interface for handing off tasks. Managed by a {@link io.druid.indexing.overlord.TaskQueue}.
|
||||
*/
|
||||
public interface TaskRunner
|
||||
{
|
||||
/**
|
||||
* Provide a new task runner with a list of tasks that may already be running. Will be called once shortly
|
||||
* after instantiation and before any calls to {@link #run}. Bootstrapping should not be construed as a command
|
||||
* to run the tasks; they will be passed to {@link #run} one-by-one when this is desired. Some bootstrapped tasks
|
||||
* may not actually be running (for example, if they are currently held back due to not having a lock).
|
||||
*
|
||||
* @param tasks the tasks
|
||||
*/
|
||||
public void bootstrap(List<Task> tasks);
|
||||
|
||||
/**
|
||||
* Run a task. The returned status should be some kind of completed status.
|
||||
*
|
||||
* @param task task to run
|
||||
*
|
||||
* @return task status, eventually
|
||||
*/
|
||||
public ListenableFuture<TaskStatus> run(Task task);
|
||||
|
||||
/**
|
||||
* Best-effort task shutdown. May or may not do anything.
|
||||
* Inform the task runner it can clean up any resources associated with a task. This implies shutdown of any
|
||||
* currently-running tasks.
|
||||
*/
|
||||
public void shutdown(String taskid);
|
||||
|
||||
|
@ -59,5 +49,7 @@ public interface TaskRunner
|
|||
|
||||
public Collection<? extends TaskRunnerWorkItem> getPendingTasks();
|
||||
|
||||
public Collection<? extends TaskRunnerWorkItem> getKnownTasks();
|
||||
|
||||
public Collection<ZkWorker> getWorkers();
|
||||
}
|
||||
|
|
|
@ -19,11 +19,11 @@
|
|||
|
||||
package io.druid.indexing.overlord;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonIgnore;
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
import com.google.common.collect.ComparisonChain;
|
||||
import com.google.common.util.concurrent.ListenableFuture;
|
||||
import io.druid.indexing.common.TaskStatus;
|
||||
import io.druid.indexing.common.task.Task;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.DateTimeComparator;
|
||||
|
||||
|
@ -32,38 +32,39 @@ import org.joda.time.DateTimeComparator;
|
|||
*/
|
||||
public class TaskRunnerWorkItem implements Comparable<TaskRunnerWorkItem>
|
||||
{
|
||||
private final Task task;
|
||||
private final String taskId;
|
||||
private final ListenableFuture<TaskStatus> result;
|
||||
private final DateTime createdTime;
|
||||
private final DateTime queueInsertionTime;
|
||||
|
||||
public TaskRunnerWorkItem(
|
||||
Task task,
|
||||
String taskId,
|
||||
ListenableFuture<TaskStatus> result
|
||||
)
|
||||
{
|
||||
this(task, result, new DateTime(), new DateTime());
|
||||
this(taskId, result, new DateTime(), new DateTime());
|
||||
}
|
||||
|
||||
public TaskRunnerWorkItem(
|
||||
Task task,
|
||||
String taskId,
|
||||
ListenableFuture<TaskStatus> result,
|
||||
DateTime createdTime,
|
||||
DateTime queueInsertionTime
|
||||
)
|
||||
{
|
||||
this.task = task;
|
||||
this.taskId = taskId;
|
||||
this.result = result;
|
||||
this.createdTime = createdTime;
|
||||
this.queueInsertionTime = queueInsertionTime;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
public Task getTask()
|
||||
public String getTaskId()
|
||||
{
|
||||
return task;
|
||||
return taskId;
|
||||
}
|
||||
|
||||
@JsonIgnore
|
||||
public ListenableFuture<TaskStatus> getResult()
|
||||
{
|
||||
return result;
|
||||
|
@ -83,7 +84,7 @@ public class TaskRunnerWorkItem implements Comparable<TaskRunnerWorkItem>
|
|||
|
||||
public TaskRunnerWorkItem withQueueInsertionTime(DateTime time)
|
||||
{
|
||||
return new TaskRunnerWorkItem(task, result, createdTime, time);
|
||||
return new TaskRunnerWorkItem(taskId, result, createdTime, time);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -91,7 +92,7 @@ public class TaskRunnerWorkItem implements Comparable<TaskRunnerWorkItem>
|
|||
{
|
||||
return ComparisonChain.start()
|
||||
.compare(createdTime, taskRunnerWorkItem.getCreatedTime(), DateTimeComparator.getInstance())
|
||||
.compare(task.getId(), taskRunnerWorkItem.getTask().getId())
|
||||
.compare(taskId, taskRunnerWorkItem.getTaskId())
|
||||
.result();
|
||||
}
|
||||
|
||||
|
@ -99,9 +100,10 @@ public class TaskRunnerWorkItem implements Comparable<TaskRunnerWorkItem>
|
|||
public String toString()
|
||||
{
|
||||
return "TaskRunnerWorkItem{" +
|
||||
"task=" + task +
|
||||
"taskId='" + taskId + '\'' +
|
||||
", result=" + result +
|
||||
", createdTime=" + createdTime +
|
||||
", queueInsertionTime=" + queueInsertionTime +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
|
|
@ -77,9 +77,17 @@ public interface TaskStorage
|
|||
public List<TaskAction> getAuditLogs(String taskid);
|
||||
|
||||
/**
|
||||
* Returns a list of currently-running tasks as stored in the storage facility, in no particular order.
|
||||
* Returns a list of currently running or pending tasks as stored in the storage facility. No particular order
|
||||
* is guaranteed, but implementations are encouraged to return tasks in ascending order of creation.
|
||||
*/
|
||||
public List<Task> getRunningTasks();
|
||||
public List<Task> getActiveTasks();
|
||||
|
||||
/**
|
||||
* Returns a list of recently finished task statuses as stored in the storage facility. No particular order
|
||||
* is guaranteed, but implementations are encouraged to return tasks in descending order of creation. No particular
|
||||
* standard of "recent" is guaranteed, and in fact, this method is permitted to simply return nothing.
|
||||
*/
|
||||
public List<TaskStatus> getRecentlyFinishedTaskStatuses();
|
||||
|
||||
/**
|
||||
* Returns a list of locks for a particular task.
|
||||
|
|
|
@ -21,21 +21,17 @@ package io.druid.indexing.overlord;
|
|||
|
||||
import com.google.common.base.Function;
|
||||
import com.google.common.base.Optional;
|
||||
import com.google.common.base.Predicate;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Sets;
|
||||
import com.google.inject.Inject;
|
||||
import com.metamx.common.guava.FunctionalIterable;
|
||||
import io.druid.indexing.common.TaskStatus;
|
||||
import io.druid.indexing.common.actions.SegmentInsertAction;
|
||||
import io.druid.indexing.common.actions.SpawnTasksAction;
|
||||
import io.druid.indexing.common.actions.TaskAction;
|
||||
import io.druid.indexing.common.task.Task;
|
||||
import io.druid.timeline.DataSegment;
|
||||
|
||||
import javax.annotation.Nullable;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
|
@ -51,132 +47,41 @@ public class TaskStorageQueryAdapter
|
|||
this.storage = storage;
|
||||
}
|
||||
|
||||
public List<Task> getActiveTasks()
|
||||
{
|
||||
return storage.getActiveTasks();
|
||||
}
|
||||
|
||||
public List<TaskStatus> getRecentlyFinishedTaskStatuses()
|
||||
{
|
||||
return storage.getRecentlyFinishedTaskStatuses();
|
||||
}
|
||||
|
||||
public Optional<Task> getTask(final String taskid)
|
||||
{
|
||||
return storage.getTask(taskid);
|
||||
}
|
||||
|
||||
public Optional<TaskStatus> getStatus(final String taskid)
|
||||
{
|
||||
return storage.getStatus(taskid);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns all recursive task statuses for a particular task, staying within the same task group. Includes that
|
||||
* task, plus any tasks it spawned, and so on. Does not include spawned tasks that ended up in a different task
|
||||
* group. Does not include this task's parents or siblings.
|
||||
*/
|
||||
public Map<String, Optional<TaskStatus>> getSameGroupChildStatuses(final String taskid)
|
||||
{
|
||||
final Optional<Task> taskOptional = storage.getTask(taskid);
|
||||
final Optional<TaskStatus> statusOptional = storage.getStatus(taskid);
|
||||
final ImmutableMap.Builder<String, Optional<TaskStatus>> resultBuilder = ImmutableMap.builder();
|
||||
|
||||
resultBuilder.put(taskid, statusOptional);
|
||||
|
||||
final Iterable<Task> nextTasks = FunctionalIterable
|
||||
.create(storage.getAuditLogs(taskid)).filter(
|
||||
new Predicate<TaskAction>()
|
||||
{
|
||||
@Override
|
||||
public boolean apply(TaskAction taskAction)
|
||||
{
|
||||
return taskAction instanceof SpawnTasksAction;
|
||||
}
|
||||
}
|
||||
).transformCat(
|
||||
new Function<TaskAction, Iterable<Task>>()
|
||||
{
|
||||
@Override
|
||||
public Iterable<Task> apply(TaskAction taskAction)
|
||||
{
|
||||
return ((SpawnTasksAction) taskAction).getNewTasks();
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
if(taskOptional.isPresent() && statusOptional.isPresent()) {
|
||||
for(final Task nextTask : nextTasks) {
|
||||
if(nextTask.getGroupId().equals(taskOptional.get().getGroupId())) {
|
||||
resultBuilder.putAll(getSameGroupChildStatuses(nextTask.getId()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return resultBuilder.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* Like {@link #getSameGroupChildStatuses}, but flattens the recursive statuses into a single, merged status.
|
||||
*/
|
||||
public Optional<TaskStatus> getSameGroupMergedStatus(final String taskid)
|
||||
{
|
||||
final Map<String, Optional<TaskStatus>> statuses = getSameGroupChildStatuses(taskid);
|
||||
|
||||
int nSuccesses = 0;
|
||||
int nFailures = 0;
|
||||
int nTotal = 0;
|
||||
int nPresent = 0;
|
||||
|
||||
for(final Optional<TaskStatus> statusOption : statuses.values()) {
|
||||
nTotal ++;
|
||||
|
||||
if(statusOption.isPresent()) {
|
||||
nPresent ++;
|
||||
|
||||
final TaskStatus status = statusOption.get();
|
||||
|
||||
if(status.isSuccess()) {
|
||||
nSuccesses ++;
|
||||
} else if(status.isFailure()) {
|
||||
nFailures ++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
final Optional<TaskStatus> status;
|
||||
|
||||
if(nPresent == 0) {
|
||||
status = Optional.absent();
|
||||
} else if(nSuccesses == nTotal) {
|
||||
status = Optional.of(TaskStatus.success(taskid));
|
||||
} else if(nFailures > 0) {
|
||||
status = Optional.of(TaskStatus.failure(taskid));
|
||||
} else {
|
||||
status = Optional.of(TaskStatus.running(taskid));
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns all segments created by descendants for a particular task that stayed within the same task group. Includes
|
||||
* that task, plus any tasks it spawned, and so on. Does not include spawned tasks that ended up in a different task
|
||||
* group. Does not include this task's parents or siblings.
|
||||
* Returns all segments created by this task.
|
||||
*
|
||||
* This method is useful when you want to figure out all of the things a single task spawned. It does pose issues
|
||||
* with the result set perhaps growing boundlessly and we do not do anything to protect against that. Use at your
|
||||
* own risk and know that at some point, we might adjust this to actually enforce some sort of limits.
|
||||
*/
|
||||
public Set<DataSegment> getSameGroupNewSegments(final String taskid)
|
||||
public Set<DataSegment> getInsertedSegments(final String taskid)
|
||||
{
|
||||
final Optional<Task> taskOptional = storage.getTask(taskid);
|
||||
final Set<DataSegment> segments = Sets.newHashSet();
|
||||
final List<Task> nextTasks = Lists.newArrayList();
|
||||
|
||||
for(final TaskAction action : storage.getAuditLogs(taskid)) {
|
||||
if(action instanceof SpawnTasksAction) {
|
||||
nextTasks.addAll(((SpawnTasksAction) action).getNewTasks());
|
||||
}
|
||||
|
||||
if(action instanceof SegmentInsertAction) {
|
||||
for (final TaskAction action : storage.getAuditLogs(taskid)) {
|
||||
if (action instanceof SegmentInsertAction) {
|
||||
segments.addAll(((SegmentInsertAction) action).getSegments());
|
||||
}
|
||||
}
|
||||
|
||||
if(taskOptional.isPresent()) {
|
||||
for(final Task nextTask : nextTasks) {
|
||||
if(nextTask.getGroupId().equals(taskOptional.get().getGroupId())) {
|
||||
segments.addAll(getSameGroupNewSegments(nextTask.getId()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return segments;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
package io.druid.indexing.overlord;
|
||||
|
||||
import com.google.common.base.Function;
|
||||
import com.google.api.client.repackaged.com.google.common.base.Preconditions;
|
||||
import com.google.common.base.Throwables;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.Lists;
|
||||
|
@ -46,7 +46,6 @@ import org.joda.time.Interval;
|
|||
|
||||
import java.io.File;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ConcurrentSkipListSet;
|
||||
|
@ -58,7 +57,7 @@ public class ThreadPoolTaskRunner implements TaskRunner, QuerySegmentWalker
|
|||
{
|
||||
private final TaskToolboxFactory toolboxFactory;
|
||||
private final ListeningExecutorService exec;
|
||||
private final Set<TaskRunnerWorkItem> runningItems = new ConcurrentSkipListSet<TaskRunnerWorkItem>();
|
||||
private final Set<ThreadPoolTaskRunnerWorkItem> runningItems = new ConcurrentSkipListSet<>();
|
||||
|
||||
private static final EmittingLogger log = new EmittingLogger(ThreadPoolTaskRunner.class);
|
||||
|
||||
|
@ -67,7 +66,7 @@ public class ThreadPoolTaskRunner implements TaskRunner, QuerySegmentWalker
|
|||
TaskToolboxFactory toolboxFactory
|
||||
)
|
||||
{
|
||||
this.toolboxFactory = toolboxFactory;
|
||||
this.toolboxFactory = Preconditions.checkNotNull(toolboxFactory, "toolboxFactory");
|
||||
this.exec = MoreExecutors.listeningDecorator(Execs.singleThreaded("task-runner-%d"));
|
||||
}
|
||||
|
||||
|
@ -77,19 +76,12 @@ public class ThreadPoolTaskRunner implements TaskRunner, QuerySegmentWalker
|
|||
exec.shutdownNow();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void bootstrap(List<Task> tasks)
|
||||
{
|
||||
// do nothing
|
||||
}
|
||||
|
||||
@Override
|
||||
public ListenableFuture<TaskStatus> run(final Task task)
|
||||
{
|
||||
final TaskToolbox toolbox = toolboxFactory.build(task);
|
||||
final ListenableFuture<TaskStatus> statusFuture = exec.submit(new ExecutorServiceTaskRunnerCallable(task, toolbox));
|
||||
|
||||
final TaskRunnerWorkItem taskRunnerWorkItem = new TaskRunnerWorkItem(task, statusFuture);
|
||||
final ListenableFuture<TaskStatus> statusFuture = exec.submit(new ThreadPoolTaskRunnerCallable(task, toolbox));
|
||||
final ThreadPoolTaskRunnerWorkItem taskRunnerWorkItem = new ThreadPoolTaskRunnerWorkItem(task, statusFuture);
|
||||
runningItems.add(taskRunnerWorkItem);
|
||||
Futures.addCallback(
|
||||
statusFuture, new FutureCallback<TaskStatus>()
|
||||
|
@ -115,7 +107,7 @@ public class ThreadPoolTaskRunner implements TaskRunner, QuerySegmentWalker
|
|||
public void shutdown(final String taskid)
|
||||
{
|
||||
for (final TaskRunnerWorkItem runningItem : runningItems) {
|
||||
if (runningItem.getTask().getId().equals(taskid)) {
|
||||
if (runningItem.getTaskId().equals(taskid)) {
|
||||
runningItem.getResult().cancel(true);
|
||||
}
|
||||
}
|
||||
|
@ -124,7 +116,7 @@ public class ThreadPoolTaskRunner implements TaskRunner, QuerySegmentWalker
|
|||
@Override
|
||||
public Collection<TaskRunnerWorkItem> getRunningTasks()
|
||||
{
|
||||
return ImmutableList.copyOf(runningItems);
|
||||
return ImmutableList.<TaskRunnerWorkItem>copyOf(runningItems);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -133,6 +125,12 @@ public class ThreadPoolTaskRunner implements TaskRunner, QuerySegmentWalker
|
|||
return ImmutableList.of();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<TaskRunnerWorkItem> getKnownTasks()
|
||||
{
|
||||
return ImmutableList.<TaskRunnerWorkItem>copyOf(runningItems);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<ZkWorker> getWorkers()
|
||||
{
|
||||
|
@ -155,18 +153,8 @@ public class ThreadPoolTaskRunner implements TaskRunner, QuerySegmentWalker
|
|||
{
|
||||
QueryRunner<T> queryRunner = null;
|
||||
|
||||
final List<Task> runningTasks = Lists.transform(
|
||||
ImmutableList.copyOf(getRunningTasks()), new Function<TaskRunnerWorkItem, Task>()
|
||||
{
|
||||
@Override
|
||||
public Task apply(TaskRunnerWorkItem o)
|
||||
{
|
||||
return o.getTask();
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
for (final Task task : runningTasks) {
|
||||
for (final ThreadPoolTaskRunnerWorkItem taskRunnerWorkItem : ImmutableList.copyOf(runningItems)) {
|
||||
final Task task = taskRunnerWorkItem.getTask();
|
||||
if (task.getDataSource().equals(query.getDataSource())) {
|
||||
final QueryRunner<T> taskQueryRunner = task.getQueryRunner(query);
|
||||
|
||||
|
@ -185,12 +173,31 @@ public class ThreadPoolTaskRunner implements TaskRunner, QuerySegmentWalker
|
|||
return queryRunner == null ? new NoopQueryRunner<T>() : queryRunner;
|
||||
}
|
||||
|
||||
private static class ExecutorServiceTaskRunnerCallable implements Callable<TaskStatus>
|
||||
private static class ThreadPoolTaskRunnerWorkItem extends TaskRunnerWorkItem
|
||||
{
|
||||
private final Task task;
|
||||
|
||||
private ThreadPoolTaskRunnerWorkItem(
|
||||
Task task,
|
||||
ListenableFuture<TaskStatus> result
|
||||
)
|
||||
{
|
||||
super(task.getId(), result);
|
||||
this.task = task;
|
||||
}
|
||||
|
||||
public Task getTask()
|
||||
{
|
||||
return task;
|
||||
}
|
||||
}
|
||||
|
||||
private static class ThreadPoolTaskRunnerCallable implements Callable<TaskStatus>
|
||||
{
|
||||
private final Task task;
|
||||
private final TaskToolbox toolbox;
|
||||
|
||||
public ExecutorServiceTaskRunnerCallable(Task task, TaskToolbox toolbox)
|
||||
public ThreadPoolTaskRunnerCallable(Task task, TaskToolbox toolbox)
|
||||
{
|
||||
this.task = task;
|
||||
this.toolbox = toolbox;
|
||||
|
@ -242,10 +249,5 @@ public class ThreadPoolTaskRunner implements TaskRunner, QuerySegmentWalker
|
|||
throw Throwables.propagate(e);
|
||||
}
|
||||
}
|
||||
|
||||
public TaskRunnerWorkItem getTaskRunnerWorkItem()
|
||||
{
|
||||
return new TaskRunnerWorkItem(task, null);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -71,9 +71,9 @@ public class ZkWorker implements Closeable
|
|||
};
|
||||
}
|
||||
|
||||
public void start(PathChildrenCache.StartMode startMode) throws Exception
|
||||
public void start() throws Exception
|
||||
{
|
||||
statusCache.start(startMode);
|
||||
statusCache.start(PathChildrenCache.StartMode.POST_INITIALIZED_EVENT);
|
||||
}
|
||||
|
||||
public void addListener(PathChildrenCacheListener listener)
|
||||
|
|
|
@ -62,7 +62,8 @@ public class ForkingTaskRunnerConfig
|
|||
"druid",
|
||||
"io.druid",
|
||||
"user.timezone",
|
||||
"file.encoding"
|
||||
"file.encoding",
|
||||
"java.io.tmpdir"
|
||||
);
|
||||
|
||||
public String getTaskDir()
|
||||
|
|
|
@ -0,0 +1,79 @@
|
|||
/*
|
||||
* Druid - a distributed column store.
|
||||
* Copyright (C) 2012, 2013 Metamarkets Group Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
package io.druid.indexing.overlord.config;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonCreator;
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
import org.joda.time.Duration;
|
||||
import org.joda.time.Period;
|
||||
|
||||
public class TaskQueueConfig
|
||||
{
|
||||
@JsonProperty
|
||||
private int maxSize;
|
||||
|
||||
@JsonProperty
|
||||
private Duration startDelay;
|
||||
|
||||
@JsonProperty
|
||||
private Duration restartDelay;
|
||||
|
||||
@JsonProperty
|
||||
private Duration storageSyncRate;
|
||||
|
||||
@JsonCreator
|
||||
public TaskQueueConfig(
|
||||
@JsonProperty("maxSize") final Integer maxSize,
|
||||
@JsonProperty("startDelay") final Period startDelay,
|
||||
@JsonProperty("restartDelay") final Period restartDelay,
|
||||
@JsonProperty("storageSyncRate") final Period storageSyncRate
|
||||
)
|
||||
{
|
||||
this.maxSize = maxSize == null ? Integer.MAX_VALUE : maxSize;
|
||||
this.startDelay = defaultDuration(startDelay, "PT1M");
|
||||
this.restartDelay = defaultDuration(restartDelay, "PT30S");
|
||||
this.storageSyncRate = defaultDuration(storageSyncRate, "PT1M");
|
||||
}
|
||||
|
||||
public int getMaxSize()
|
||||
{
|
||||
return maxSize;
|
||||
}
|
||||
|
||||
public Duration getStartDelay()
|
||||
{
|
||||
return startDelay;
|
||||
}
|
||||
|
||||
public Duration getRestartDelay()
|
||||
{
|
||||
return restartDelay;
|
||||
}
|
||||
|
||||
public Duration getStorageSyncRate()
|
||||
{
|
||||
return storageSyncRate;
|
||||
}
|
||||
|
||||
private static Duration defaultDuration(final Period period, final String theDefault)
|
||||
{
|
||||
return (period == null ? new Period(theDefault) : period).toStandardDuration();
|
||||
}
|
||||
}
|
|
@ -1,204 +0,0 @@
|
|||
/*
|
||||
* Druid - a distributed column store.
|
||||
* Copyright (C) 2012, 2013 Metamarkets Group Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
package io.druid.indexing.overlord.exec;
|
||||
|
||||
import com.google.common.base.Throwables;
|
||||
import com.google.common.util.concurrent.FutureCallback;
|
||||
import com.google.common.util.concurrent.Futures;
|
||||
import com.google.common.util.concurrent.ListenableFuture;
|
||||
import com.metamx.common.lifecycle.LifecycleStart;
|
||||
import com.metamx.common.lifecycle.LifecycleStop;
|
||||
import com.metamx.emitter.EmittingLogger;
|
||||
import com.metamx.emitter.service.ServiceEmitter;
|
||||
import com.metamx.emitter.service.ServiceMetricEvent;
|
||||
import io.druid.indexing.common.TaskStatus;
|
||||
import io.druid.indexing.common.actions.TaskActionClientFactory;
|
||||
import io.druid.indexing.common.task.Task;
|
||||
import io.druid.indexing.overlord.TaskQueue;
|
||||
import io.druid.indexing.overlord.TaskRunner;
|
||||
|
||||
public class TaskConsumer implements Runnable
|
||||
{
|
||||
private final TaskQueue queue;
|
||||
private final TaskRunner runner;
|
||||
private final TaskActionClientFactory taskActionClientFactory;
|
||||
private final ServiceEmitter emitter;
|
||||
private final Thread thready;
|
||||
|
||||
private volatile boolean shutdown = false;
|
||||
|
||||
private static final EmittingLogger log = new EmittingLogger(TaskConsumer.class);
|
||||
|
||||
public TaskConsumer(
|
||||
TaskQueue queue,
|
||||
TaskRunner runner,
|
||||
TaskActionClientFactory taskActionClientFactory,
|
||||
ServiceEmitter emitter
|
||||
)
|
||||
{
|
||||
this.queue = queue;
|
||||
this.runner = runner;
|
||||
this.taskActionClientFactory = taskActionClientFactory;
|
||||
this.emitter = emitter;
|
||||
this.thready = new Thread(this);
|
||||
}
|
||||
|
||||
@LifecycleStart
|
||||
public void start()
|
||||
{
|
||||
thready.start();
|
||||
}
|
||||
|
||||
@LifecycleStop
|
||||
public void stop()
|
||||
{
|
||||
shutdown = true;
|
||||
thready.interrupt();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run()
|
||||
{
|
||||
|
||||
try {
|
||||
while (!Thread.currentThread().isInterrupted()) {
|
||||
|
||||
final Task task;
|
||||
|
||||
try {
|
||||
task = queue.take();
|
||||
}
|
||||
catch (InterruptedException e) {
|
||||
log.info("Interrupted while waiting for new work");
|
||||
Thread.currentThread().interrupt();
|
||||
break;
|
||||
}
|
||||
|
||||
try {
|
||||
handoff(task);
|
||||
}
|
||||
catch (Exception e) {
|
||||
log.makeAlert(e, "Failed to hand off task")
|
||||
.addData("task", task.getId())
|
||||
.addData("type", task.getType())
|
||||
.addData("dataSource", task.getDataSource())
|
||||
.addData("interval", task.getImplicitLockInterval())
|
||||
.emit();
|
||||
|
||||
// Retry would be nice, but only after we have a way to throttle and limit them. Just fail for now.
|
||||
if (!shutdown) {
|
||||
queue.notify(task, TaskStatus.failure(task.getId()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
// exit thread
|
||||
log.error(e, "Uncaught exception while consuming tasks");
|
||||
throw Throwables.propagate(e);
|
||||
}
|
||||
}
|
||||
|
||||
private void handoff(final Task task) throws Exception
|
||||
{
|
||||
final ServiceMetricEvent.Builder metricBuilder = new ServiceMetricEvent.Builder()
|
||||
.setUser2(task.getDataSource())
|
||||
.setUser4(task.getType())
|
||||
.setUser5(task.getImplicitLockInterval().toString());
|
||||
|
||||
// Run preflight checks
|
||||
TaskStatus preflightStatus;
|
||||
try {
|
||||
preflightStatus = task.preflight(taskActionClientFactory.create(task));
|
||||
log.info("Preflight done for task: %s", task.getId());
|
||||
}
|
||||
catch (Exception e) {
|
||||
preflightStatus = TaskStatus.failure(task.getId());
|
||||
log.error(e, "Exception thrown during preflight for task: %s", task.getId());
|
||||
}
|
||||
|
||||
if (!preflightStatus.isRunnable()) {
|
||||
log.info("Task finished during preflight: %s", task.getId());
|
||||
queue.notify(task, preflightStatus);
|
||||
return;
|
||||
}
|
||||
|
||||
// Hand off work to TaskRunner, with a callback
|
||||
final ListenableFuture<TaskStatus> status = runner.run(task);
|
||||
|
||||
Futures.addCallback(
|
||||
status, new FutureCallback<TaskStatus>()
|
||||
{
|
||||
@Override
|
||||
public void onSuccess(final TaskStatus status)
|
||||
{
|
||||
log.info("Received %s status for task: %s", status.getStatusCode(), task);
|
||||
handleStatus(status);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t)
|
||||
{
|
||||
log.makeAlert(t, "Failed to run task")
|
||||
.addData("task", task.getId())
|
||||
.addData("type", task.getType())
|
||||
.addData("dataSource", task.getDataSource())
|
||||
.addData("interval", task.getImplicitLockInterval())
|
||||
.emit();
|
||||
|
||||
handleStatus(TaskStatus.failure(task.getId()));
|
||||
}
|
||||
|
||||
private void handleStatus(TaskStatus status)
|
||||
{
|
||||
try {
|
||||
// If we're not supposed to be running anymore, don't do anything. Somewhat racey if the flag gets set after
|
||||
// we check and before we commit the database transaction, but better than nothing.
|
||||
if (shutdown) {
|
||||
log.info("Abandoning task due to shutdown: %s", task.getId());
|
||||
return;
|
||||
}
|
||||
|
||||
queue.notify(task, status);
|
||||
|
||||
// Emit event and log, if the task is done
|
||||
if (status.isComplete()) {
|
||||
metricBuilder.setUser3(status.getStatusCode().toString());
|
||||
emitter.emit(metricBuilder.build("indexer/time/run/millis", status.getDuration()));
|
||||
|
||||
log.info(
|
||||
"Task %s: %s (%d run duration)",
|
||||
status.getStatusCode(),
|
||||
task,
|
||||
status.getDuration()
|
||||
);
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
log.makeAlert(e, "Failed to handle task status")
|
||||
.addData("task", task.getId())
|
||||
.addData("statusCode", status.getStatusCode())
|
||||
.emit();
|
||||
}
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
|
@ -19,15 +19,20 @@
|
|||
|
||||
package io.druid.indexing.overlord.http;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonValue;
|
||||
import com.google.common.base.Function;
|
||||
import com.google.common.base.Optional;
|
||||
import com.google.common.collect.Collections2;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
import com.google.common.collect.Sets;
|
||||
import com.google.common.io.InputSupplier;
|
||||
import com.google.common.util.concurrent.SettableFuture;
|
||||
import com.google.inject.Inject;
|
||||
import com.metamx.common.logger.Logger;
|
||||
import io.druid.common.config.JacksonConfigManager;
|
||||
import io.druid.indexing.common.TaskStatus;
|
||||
import io.druid.indexing.common.actions.TaskActionClient;
|
||||
import io.druid.indexing.common.actions.TaskActionHolder;
|
||||
import io.druid.indexing.common.task.Task;
|
||||
|
@ -40,6 +45,7 @@ import io.druid.indexing.overlord.scaling.ResourceManagementScheduler;
|
|||
import io.druid.indexing.overlord.setup.WorkerSetupData;
|
||||
import io.druid.tasklogs.TaskLogStreamer;
|
||||
import io.druid.timeline.DataSegment;
|
||||
import org.joda.time.DateTime;
|
||||
|
||||
import javax.ws.rs.Consumes;
|
||||
import javax.ws.rs.DefaultValue;
|
||||
|
@ -52,6 +58,8 @@ import javax.ws.rs.QueryParam;
|
|||
import javax.ws.rs.core.Response;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
@ -63,27 +71,6 @@ public class OverlordResource
|
|||
{
|
||||
private static final Logger log = new Logger(OverlordResource.class);
|
||||
|
||||
private static Function<TaskRunnerWorkItem, Map<String, Object>> simplifyTaskFn =
|
||||
new Function<TaskRunnerWorkItem, Map<String, Object>>()
|
||||
{
|
||||
@Override
|
||||
public Map<String, Object> apply(TaskRunnerWorkItem input)
|
||||
{
|
||||
return new ImmutableMap.Builder<String, Object>()
|
||||
.put("id", input.getTask().getId())
|
||||
.put("dataSource", input.getTask().getDataSource())
|
||||
.put("interval",
|
||||
!input.getTask().getImplicitLockInterval().isPresent()
|
||||
? ""
|
||||
: input.getTask().getImplicitLockInterval().get()
|
||||
)
|
||||
.put("nodeType", input.getTask().getNodeType() == null ? "" : input.getTask().getNodeType())
|
||||
.put("createdTime", input.getCreatedTime())
|
||||
.put("queueInsertionTime", input.getQueueInsertionTime())
|
||||
.build();
|
||||
}
|
||||
};
|
||||
|
||||
private final TaskMaster taskMaster;
|
||||
private final TaskStorageQueryAdapter taskStorageQueryAdapter;
|
||||
private final TaskLogStreamer taskLogStreamer;
|
||||
|
@ -146,12 +133,20 @@ public class OverlordResource
|
|||
);
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/task/{taskid}")
|
||||
@Produces("application/json")
|
||||
public Response getTaskPayload(@PathParam("taskid") String taskid)
|
||||
{
|
||||
return optionalTaskResponse(taskid, "payload", taskStorageQueryAdapter.getTask(taskid));
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/task/{taskid}/status")
|
||||
@Produces("application/json")
|
||||
public Response getTaskStatus(@PathParam("taskid") String taskid)
|
||||
{
|
||||
return optionalTaskResponse(taskid, "status", taskStorageQueryAdapter.getSameGroupMergedStatus(taskid));
|
||||
return optionalTaskResponse(taskid, "status", taskStorageQueryAdapter.getStatus(taskid));
|
||||
}
|
||||
|
||||
@GET
|
||||
|
@ -159,7 +154,7 @@ public class OverlordResource
|
|||
@Produces("application/json")
|
||||
public Response getTaskSegments(@PathParam("taskid") String taskid)
|
||||
{
|
||||
final Set<DataSegment> segments = taskStorageQueryAdapter.getSameGroupNewSegments(taskid);
|
||||
final Set<DataSegment> segments = taskStorageQueryAdapter.getInsertedSegments(taskid);
|
||||
return Response.ok().entity(segments).build();
|
||||
}
|
||||
|
||||
|
@ -169,13 +164,13 @@ public class OverlordResource
|
|||
public Response doShutdown(@PathParam("taskid") final String taskid)
|
||||
{
|
||||
return asLeaderWith(
|
||||
taskMaster.getTaskRunner(),
|
||||
new Function<TaskRunner, Response>()
|
||||
taskMaster.getTaskQueue(),
|
||||
new Function<TaskQueue, Response>()
|
||||
{
|
||||
@Override
|
||||
public Response apply(TaskRunner taskRunner)
|
||||
public Response apply(TaskQueue taskQueue)
|
||||
{
|
||||
taskRunner.shutdown(taskid);
|
||||
taskQueue.shutdown(taskid);
|
||||
return Response.ok(ImmutableMap.of("task", taskid)).build();
|
||||
}
|
||||
}
|
||||
|
@ -225,7 +220,7 @@ public class OverlordResource
|
|||
final Map<String, Object> retMap;
|
||||
|
||||
// It would be great to verify that this worker is actually supposed to be running the task before
|
||||
// actually doing the task. Some ideas for how that could be done would be using some sort of attempt_id
|
||||
// actually doing the action. Some ideas for how that could be done would be using some sort of attempt_id
|
||||
// or token that gets passed around.
|
||||
|
||||
try {
|
||||
|
@ -245,39 +240,64 @@ public class OverlordResource
|
|||
}
|
||||
|
||||
@GET
|
||||
@Path("/pendingTasks")
|
||||
@Path("/waitingTasks")
|
||||
@Produces("application/json")
|
||||
public Response getPendingTasks(
|
||||
@QueryParam("full") String full
|
||||
)
|
||||
public Response getWaitingTasks()
|
||||
{
|
||||
if (full != null) {
|
||||
return asLeaderWith(
|
||||
taskMaster.getTaskRunner(),
|
||||
new Function<TaskRunner, Response>()
|
||||
{
|
||||
@Override
|
||||
public Response apply(TaskRunner taskRunner)
|
||||
{
|
||||
return Response.ok(taskRunner.getPendingTasks()).build();
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
return asLeaderWith(
|
||||
taskMaster.getTaskRunner(),
|
||||
new Function<TaskRunner, Response>()
|
||||
return workItemsResponse(
|
||||
new Function<TaskRunner, Collection<? extends TaskRunnerWorkItem>>()
|
||||
{
|
||||
@Override
|
||||
public Response apply(TaskRunner taskRunner)
|
||||
public Collection<? extends TaskRunnerWorkItem> apply(TaskRunner taskRunner)
|
||||
{
|
||||
return Response.ok(
|
||||
Collections2.transform(
|
||||
taskRunner.getPendingTasks(),
|
||||
simplifyTaskFn
|
||||
// A bit roundabout, but works as a way of figuring out what tasks haven't been handed
|
||||
// off to the runner yet:
|
||||
final List<Task> activeTasks = taskStorageQueryAdapter.getActiveTasks();
|
||||
final Set<String> runnersKnownTasks = Sets.newHashSet(
|
||||
Iterables.transform(
|
||||
taskRunner.getKnownTasks(),
|
||||
new Function<TaskRunnerWorkItem, String>()
|
||||
{
|
||||
@Override
|
||||
public String apply(final TaskRunnerWorkItem workItem)
|
||||
{
|
||||
return workItem.getTaskId();
|
||||
}
|
||||
}
|
||||
)
|
||||
).build();
|
||||
);
|
||||
final List<TaskRunnerWorkItem> waitingTasks = Lists.newArrayList();
|
||||
for (final Task task : activeTasks) {
|
||||
if (!runnersKnownTasks.contains(task.getId())) {
|
||||
waitingTasks.add(
|
||||
// Would be nice to include the real created date, but the TaskStorage API doesn't yet allow it.
|
||||
new TaskRunnerWorkItem(
|
||||
task.getId(),
|
||||
SettableFuture.<TaskStatus>create(),
|
||||
new DateTime(0),
|
||||
new DateTime(0)
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
return waitingTasks;
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/pendingTasks")
|
||||
@Produces("application/json")
|
||||
public Response getPendingTasks()
|
||||
{
|
||||
return workItemsResponse(
|
||||
new Function<TaskRunner, Collection<? extends TaskRunnerWorkItem>>()
|
||||
{
|
||||
@Override
|
||||
public Collection<? extends TaskRunnerWorkItem> apply(TaskRunner taskRunner)
|
||||
{
|
||||
return taskRunner.getPendingTasks();
|
||||
}
|
||||
}
|
||||
);
|
||||
|
@ -286,42 +306,45 @@ public class OverlordResource
|
|||
@GET
|
||||
@Path("/runningTasks")
|
||||
@Produces("application/json")
|
||||
public Response getRunningTasks(
|
||||
@QueryParam("full") String full
|
||||
)
|
||||
public Response getRunningTasks()
|
||||
{
|
||||
if (full != null) {
|
||||
return asLeaderWith(
|
||||
taskMaster.getTaskRunner(),
|
||||
new Function<TaskRunner, Response>()
|
||||
{
|
||||
@Override
|
||||
public Response apply(TaskRunner taskRunner)
|
||||
{
|
||||
return Response.ok(taskRunner.getRunningTasks()).build();
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
return asLeaderWith(
|
||||
taskMaster.getTaskRunner(),
|
||||
new Function<TaskRunner, Response>()
|
||||
return workItemsResponse(
|
||||
new Function<TaskRunner, Collection<? extends TaskRunnerWorkItem>>()
|
||||
{
|
||||
@Override
|
||||
public Response apply(TaskRunner taskRunner)
|
||||
public Collection<? extends TaskRunnerWorkItem> apply(TaskRunner taskRunner)
|
||||
{
|
||||
return Response.ok(
|
||||
Collections2.transform(
|
||||
taskRunner.getRunningTasks(),
|
||||
simplifyTaskFn
|
||||
)
|
||||
).build();
|
||||
return taskRunner.getRunningTasks();
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/completeTasks")
|
||||
@Produces("application/json")
|
||||
public Response getCompleteTasks()
|
||||
{
|
||||
final List<TaskResponseObject> completeTasks = Lists.transform(
|
||||
taskStorageQueryAdapter.getRecentlyFinishedTaskStatuses(),
|
||||
new Function<TaskStatus, TaskResponseObject>()
|
||||
{
|
||||
@Override
|
||||
public TaskResponseObject apply(TaskStatus taskStatus)
|
||||
{
|
||||
// Would be nice to include the real created date, but the TaskStorage API doesn't yet allow it.
|
||||
return new TaskResponseObject(
|
||||
taskStatus.getId(),
|
||||
new DateTime(0),
|
||||
new DateTime(0),
|
||||
Optional.of(taskStatus)
|
||||
);
|
||||
}
|
||||
}
|
||||
);
|
||||
return Response.ok(completeTasks).build();
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/workers")
|
||||
@Produces("application/json")
|
||||
|
@ -345,17 +368,13 @@ public class OverlordResource
|
|||
@Produces("application/json")
|
||||
public Response getScalingState()
|
||||
{
|
||||
return asLeaderWith(
|
||||
taskMaster.getResourceManagementScheduler(),
|
||||
new Function<ResourceManagementScheduler, Response>()
|
||||
{
|
||||
@Override
|
||||
public Response apply(ResourceManagementScheduler resourceManagementScheduler)
|
||||
{
|
||||
return Response.ok(resourceManagementScheduler.getStats()).build();
|
||||
}
|
||||
}
|
||||
);
|
||||
// Don't use asLeaderWith, since we want to return 200 instead of 503 when missing an autoscaler.
|
||||
final Optional<ResourceManagementScheduler> rms = taskMaster.getResourceManagementScheduler();
|
||||
if (rms.isPresent()) {
|
||||
return Response.ok(rms.get().getStats()).build();
|
||||
} else {
|
||||
return Response.ok().build();
|
||||
}
|
||||
}
|
||||
|
||||
@GET
|
||||
|
@ -380,7 +399,39 @@ public class OverlordResource
|
|||
}
|
||||
}
|
||||
|
||||
public <T> Response optionalTaskResponse(String taskid, String objectType, Optional<T> x)
|
||||
private Response workItemsResponse(final Function<TaskRunner, Collection<? extends TaskRunnerWorkItem>> fn)
|
||||
{
|
||||
return asLeaderWith(
|
||||
taskMaster.getTaskRunner(),
|
||||
new Function<TaskRunner, Response>()
|
||||
{
|
||||
@Override
|
||||
public Response apply(TaskRunner taskRunner)
|
||||
{
|
||||
return Response.ok(
|
||||
Lists.transform(
|
||||
Lists.newArrayList(fn.apply(taskRunner)),
|
||||
new Function<TaskRunnerWorkItem, TaskResponseObject>()
|
||||
{
|
||||
@Override
|
||||
public TaskResponseObject apply(TaskRunnerWorkItem workItem)
|
||||
{
|
||||
return new TaskResponseObject(
|
||||
workItem.getTaskId(),
|
||||
workItem.getCreatedTime(),
|
||||
workItem.getQueueInsertionTime(),
|
||||
Optional.<TaskStatus>absent()
|
||||
);
|
||||
}
|
||||
}
|
||||
)
|
||||
).build();
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
private <T> Response optionalTaskResponse(String taskid, String objectType, Optional<T> x)
|
||||
{
|
||||
final Map<String, Object> results = Maps.newHashMap();
|
||||
results.put("task", taskid);
|
||||
|
@ -392,7 +443,7 @@ public class OverlordResource
|
|||
}
|
||||
}
|
||||
|
||||
public <T> Response asLeaderWith(Optional<T> x, Function<T, Response> f)
|
||||
private <T> Response asLeaderWith(Optional<T> x, Function<T, Response> f)
|
||||
{
|
||||
if (x.isPresent()) {
|
||||
return f.apply(x.get());
|
||||
|
@ -401,4 +452,62 @@ public class OverlordResource
|
|||
return Response.status(Response.Status.SERVICE_UNAVAILABLE).build();
|
||||
}
|
||||
}
|
||||
|
||||
private static class TaskResponseObject
|
||||
{
|
||||
private final String id;
|
||||
private final DateTime createdTime;
|
||||
private final DateTime queueInsertionTime;
|
||||
private final Optional<TaskStatus> status;
|
||||
|
||||
private TaskResponseObject(
|
||||
String id,
|
||||
DateTime createdTime,
|
||||
DateTime queueInsertionTime,
|
||||
Optional<TaskStatus> status
|
||||
)
|
||||
{
|
||||
this.id = id;
|
||||
this.createdTime = createdTime;
|
||||
this.queueInsertionTime = queueInsertionTime;
|
||||
this.status = status;
|
||||
}
|
||||
|
||||
public String getId()
|
||||
{
|
||||
return id;
|
||||
}
|
||||
|
||||
public DateTime getCreatedTime()
|
||||
{
|
||||
return createdTime;
|
||||
}
|
||||
|
||||
public DateTime getQueueInsertionTime()
|
||||
{
|
||||
return queueInsertionTime;
|
||||
}
|
||||
|
||||
public Optional<TaskStatus> getStatus()
|
||||
{
|
||||
return status;
|
||||
}
|
||||
|
||||
@JsonValue
|
||||
public Map<String, Object> toJson()
|
||||
{
|
||||
final Map<String, Object> data = Maps.newLinkedHashMap();
|
||||
data.put("id", id);
|
||||
if (createdTime.getMillis() > 0) {
|
||||
data.put("createdTime", createdTime);
|
||||
}
|
||||
if (queueInsertionTime.getMillis() > 0) {
|
||||
data.put("queueInsertionTime", queueInsertionTime);
|
||||
}
|
||||
if (status.isPresent()) {
|
||||
data.put("statusCode", status.get().getStatusCode().toString());
|
||||
}
|
||||
return data;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,12 +28,10 @@ import java.util.List;
|
|||
public class AutoScalingData
|
||||
{
|
||||
private final List<String> nodeIds;
|
||||
private final List nodes;
|
||||
|
||||
public AutoScalingData(List<String> nodeIds, List nodes)
|
||||
public AutoScalingData(List<String> nodeIds)
|
||||
{
|
||||
this.nodeIds = nodeIds;
|
||||
this.nodes = nodes;
|
||||
}
|
||||
|
||||
@JsonProperty
|
||||
|
@ -42,17 +40,11 @@ public class AutoScalingData
|
|||
return nodeIds;
|
||||
}
|
||||
|
||||
public List getNodes()
|
||||
{
|
||||
return nodes;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString()
|
||||
{
|
||||
return "AutoScalingData{" +
|
||||
"nodeIds=" + nodeIds +
|
||||
", nodes=" + nodes +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
|
|
@ -125,8 +125,7 @@ public class EC2AutoScalingStrategy implements AutoScalingStrategy
|
|||
return input.getInstanceId();
|
||||
}
|
||||
}
|
||||
),
|
||||
result.getReservation().getInstances()
|
||||
)
|
||||
);
|
||||
}
|
||||
catch (Exception e) {
|
||||
|
@ -140,7 +139,7 @@ public class EC2AutoScalingStrategy implements AutoScalingStrategy
|
|||
public AutoScalingData terminate(List<String> ips)
|
||||
{
|
||||
if (ips.isEmpty()) {
|
||||
return new AutoScalingData(Lists.<String>newArrayList(), Lists.<Instance>newArrayList());
|
||||
return new AutoScalingData(Lists.<String>newArrayList());
|
||||
}
|
||||
|
||||
DescribeInstancesResult result = amazonEC2Client.describeInstances(
|
||||
|
@ -184,8 +183,7 @@ public class EC2AutoScalingStrategy implements AutoScalingStrategy
|
|||
return String.format("%s:%s", input, config.getWorkerPort());
|
||||
}
|
||||
}
|
||||
),
|
||||
instances
|
||||
)
|
||||
);
|
||||
}
|
||||
catch (Exception e) {
|
||||
|
|
|
@ -20,13 +20,16 @@
|
|||
package io.druid.indexing.overlord.scaling;
|
||||
|
||||
import com.google.common.base.Function;
|
||||
import com.google.common.base.Joiner;
|
||||
import com.google.common.base.Predicate;
|
||||
import com.google.common.base.Supplier;
|
||||
import com.google.common.collect.Collections2;
|
||||
import com.google.common.collect.FluentIterable;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Sets;
|
||||
import com.google.inject.Inject;
|
||||
import com.metamx.common.guava.FunctionalIterable;
|
||||
import com.metamx.common.ISE;
|
||||
import com.metamx.emitter.EmittingLogger;
|
||||
import io.druid.indexing.overlord.RemoteTaskRunnerWorkItem;
|
||||
import io.druid.indexing.overlord.TaskRunnerWorkItem;
|
||||
|
@ -38,7 +41,6 @@ import org.joda.time.Duration;
|
|||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentSkipListSet;
|
||||
|
||||
/**
|
||||
*/
|
||||
|
@ -48,211 +50,194 @@ public class SimpleResourceManagementStrategy implements ResourceManagementStrat
|
|||
|
||||
private final AutoScalingStrategy autoScalingStrategy;
|
||||
private final SimpleResourceManagementConfig config;
|
||||
private final Supplier<WorkerSetupData> workerSetupdDataRef;
|
||||
private final Supplier<WorkerSetupData> workerSetupDataRef;
|
||||
private final ScalingStats scalingStats;
|
||||
|
||||
private final ConcurrentSkipListSet<String> currentlyProvisioning = new ConcurrentSkipListSet<String>();
|
||||
private final ConcurrentSkipListSet<String> currentlyTerminating = new ConcurrentSkipListSet<String>();
|
||||
private final Object lock = new Object();
|
||||
private final Set<String> currentlyProvisioning = Sets.newHashSet();
|
||||
private final Set<String> currentlyTerminating = Sets.newHashSet();
|
||||
|
||||
private volatile DateTime lastProvisionTime = new DateTime();
|
||||
private volatile DateTime lastTerminateTime = new DateTime();
|
||||
private int targetWorkerCount = -1;
|
||||
private DateTime lastProvisionTime = new DateTime();
|
||||
private DateTime lastTerminateTime = new DateTime();
|
||||
|
||||
@Inject
|
||||
public SimpleResourceManagementStrategy(
|
||||
AutoScalingStrategy autoScalingStrategy,
|
||||
SimpleResourceManagementConfig config,
|
||||
Supplier<WorkerSetupData> workerSetupdDataRef
|
||||
Supplier<WorkerSetupData> workerSetupDataRef
|
||||
)
|
||||
{
|
||||
this.autoScalingStrategy = autoScalingStrategy;
|
||||
this.config = config;
|
||||
this.workerSetupdDataRef = workerSetupdDataRef;
|
||||
this.workerSetupDataRef = workerSetupDataRef;
|
||||
this.scalingStats = new ScalingStats(config.getNumEventsToTrack());
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean doProvision(Collection<RemoteTaskRunnerWorkItem> pendingTasks, Collection<ZkWorker> zkWorkers)
|
||||
{
|
||||
final WorkerSetupData workerSetupData = workerSetupdDataRef.get();
|
||||
|
||||
final String minVersion = workerSetupData.getMinVersion() == null
|
||||
? config.getWorkerVersion()
|
||||
: workerSetupData.getMinVersion();
|
||||
int maxNumWorkers = workerSetupData.getMaxNumWorkers();
|
||||
|
||||
int currValidWorkers = 0;
|
||||
for (ZkWorker zkWorker : zkWorkers) {
|
||||
if (zkWorker.isValidVersion(minVersion)) {
|
||||
currValidWorkers++;
|
||||
synchronized (lock) {
|
||||
boolean didProvision = false;
|
||||
final WorkerSetupData workerSetupData = workerSetupDataRef.get();
|
||||
if (workerSetupData == null) {
|
||||
log.warn("No workerSetupData available, cannot provision new workers.");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
final Predicate<ZkWorker> isValidWorker = createValidWorkerPredicate(config, workerSetupData);
|
||||
final int currValidWorkers = Collections2.filter(zkWorkers, isValidWorker).size();
|
||||
|
||||
if (currValidWorkers >= maxNumWorkers) {
|
||||
log.debug(
|
||||
"Cannot scale anymore. Num workers = %d, Max num workers = %d",
|
||||
zkWorkers.size(),
|
||||
workerSetupdDataRef.get().getMaxNumWorkers()
|
||||
);
|
||||
return false;
|
||||
}
|
||||
|
||||
List<String> workerNodeIds = autoScalingStrategy.ipToIdLookup(
|
||||
Lists.newArrayList(
|
||||
Iterables.<ZkWorker, String>transform(
|
||||
zkWorkers,
|
||||
new Function<ZkWorker, String>()
|
||||
{
|
||||
@Override
|
||||
public String apply(ZkWorker input)
|
||||
final List<String> workerNodeIds = autoScalingStrategy.ipToIdLookup(
|
||||
Lists.newArrayList(
|
||||
Iterables.<ZkWorker, String>transform(
|
||||
zkWorkers,
|
||||
new Function<ZkWorker, String>()
|
||||
{
|
||||
return input.getWorker().getIp();
|
||||
@Override
|
||||
public String apply(ZkWorker input)
|
||||
{
|
||||
return input.getWorker().getIp();
|
||||
}
|
||||
}
|
||||
}
|
||||
)
|
||||
)
|
||||
);
|
||||
)
|
||||
)
|
||||
);
|
||||
currentlyProvisioning.removeAll(workerNodeIds);
|
||||
|
||||
currentlyProvisioning.removeAll(workerNodeIds);
|
||||
boolean nothingProvisioning = currentlyProvisioning.isEmpty();
|
||||
updateTargetWorkerCount(workerSetupData, pendingTasks, zkWorkers);
|
||||
|
||||
if (nothingProvisioning) {
|
||||
if (hasTaskPendingBeyondThreshold(pendingTasks)) {
|
||||
AutoScalingData provisioned = autoScalingStrategy.provision();
|
||||
|
||||
if (provisioned != null) {
|
||||
currentlyProvisioning.addAll(provisioned.getNodeIds());
|
||||
int want = targetWorkerCount - (currValidWorkers + currentlyProvisioning.size());
|
||||
while (want > 0) {
|
||||
final AutoScalingData provisioned = autoScalingStrategy.provision();
|
||||
final List<String> newNodes;
|
||||
if (provisioned == null || (newNodes = provisioned.getNodeIds()).isEmpty()) {
|
||||
break;
|
||||
} else {
|
||||
currentlyProvisioning.addAll(newNodes);
|
||||
lastProvisionTime = new DateTime();
|
||||
scalingStats.addProvisionEvent(provisioned);
|
||||
|
||||
return true;
|
||||
want -= provisioned.getNodeIds().size();
|
||||
didProvision = true;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Duration durSinceLastProvision = new Duration(lastProvisionTime, new DateTime());
|
||||
|
||||
log.info(
|
||||
"%s still provisioning. Wait for all provisioned nodes to complete before requesting new worker. Current wait time: %s",
|
||||
currentlyProvisioning,
|
||||
durSinceLastProvision
|
||||
);
|
||||
if (!currentlyProvisioning.isEmpty()) {
|
||||
Duration durSinceLastProvision = new Duration(lastProvisionTime, new DateTime());
|
||||
|
||||
if (durSinceLastProvision.isLongerThan(config.getMaxScalingDuration().toStandardDuration())) {
|
||||
log.makeAlert("Worker node provisioning taking too long!")
|
||||
.addData("millisSinceLastProvision", durSinceLastProvision.getMillis())
|
||||
.addData("provisioningCount", currentlyProvisioning.size())
|
||||
.emit();
|
||||
log.info("%s provisioning. Current wait time: %s", currentlyProvisioning, durSinceLastProvision);
|
||||
|
||||
List<String> nodeIps = autoScalingStrategy.idToIpLookup(Lists.newArrayList(currentlyProvisioning));
|
||||
autoScalingStrategy.terminate(nodeIps);
|
||||
currentlyProvisioning.clear();
|
||||
if (durSinceLastProvision.isLongerThan(config.getMaxScalingDuration().toStandardDuration())) {
|
||||
log.makeAlert("Worker node provisioning taking too long!")
|
||||
.addData("millisSinceLastProvision", durSinceLastProvision.getMillis())
|
||||
.addData("provisioningCount", currentlyProvisioning.size())
|
||||
.emit();
|
||||
|
||||
List<String> nodeIps = autoScalingStrategy.idToIpLookup(Lists.newArrayList(currentlyProvisioning));
|
||||
autoScalingStrategy.terminate(nodeIps);
|
||||
currentlyProvisioning.clear();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
return didProvision;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean doTerminate(Collection<RemoteTaskRunnerWorkItem> pendingTasks, Collection<ZkWorker> zkWorkers)
|
||||
{
|
||||
Set<String> workerNodeIds = Sets.newHashSet(
|
||||
autoScalingStrategy.ipToIdLookup(
|
||||
Lists.newArrayList(
|
||||
Iterables.transform(
|
||||
zkWorkers,
|
||||
new Function<ZkWorker, String>()
|
||||
{
|
||||
@Override
|
||||
public String apply(ZkWorker input)
|
||||
synchronized (lock) {
|
||||
final WorkerSetupData workerSetupData = workerSetupDataRef.get();
|
||||
if (workerSetupData == null) {
|
||||
log.warn("No workerSetupData available, cannot terminate workers.");
|
||||
return false;
|
||||
}
|
||||
|
||||
boolean didTerminate = false;
|
||||
final Set<String> workerNodeIds = Sets.newHashSet(
|
||||
autoScalingStrategy.ipToIdLookup(
|
||||
Lists.newArrayList(
|
||||
Iterables.transform(
|
||||
zkWorkers,
|
||||
new Function<ZkWorker, String>()
|
||||
{
|
||||
return input.getWorker().getIp();
|
||||
@Override
|
||||
public String apply(ZkWorker input)
|
||||
{
|
||||
return input.getWorker().getIp();
|
||||
}
|
||||
}
|
||||
}
|
||||
)
|
||||
)
|
||||
)
|
||||
);
|
||||
|
||||
Set<String> stillExisting = Sets.newHashSet();
|
||||
for (String s : currentlyTerminating) {
|
||||
if (workerNodeIds.contains(s)) {
|
||||
stillExisting.add(s);
|
||||
}
|
||||
}
|
||||
currentlyTerminating.clear();
|
||||
currentlyTerminating.addAll(stillExisting);
|
||||
boolean nothingTerminating = currentlyTerminating.isEmpty();
|
||||
|
||||
if (nothingTerminating) {
|
||||
final int minNumWorkers = workerSetupdDataRef.get().getMinNumWorkers();
|
||||
if (zkWorkers.size() <= minNumWorkers) {
|
||||
log.info("Only [%d <= %d] nodes in the cluster, not terminating anything.", zkWorkers.size(), minNumWorkers);
|
||||
return false;
|
||||
}
|
||||
|
||||
List<ZkWorker> thoseLazyWorkers = Lists.newArrayList(
|
||||
FunctionalIterable
|
||||
.create(zkWorkers)
|
||||
.filter(
|
||||
new Predicate<ZkWorker>()
|
||||
{
|
||||
@Override
|
||||
public boolean apply(ZkWorker input)
|
||||
{
|
||||
return input.getRunningTasks().isEmpty()
|
||||
&& System.currentTimeMillis() - input.getLastCompletedTaskTime().getMillis()
|
||||
>= config.getWorkerIdleTimeout().toStandardDuration().getMillis();
|
||||
}
|
||||
}
|
||||
)
|
||||
)
|
||||
);
|
||||
|
||||
int maxPossibleNodesTerminated = zkWorkers.size() - minNumWorkers;
|
||||
int numNodesToTerminate = Math.min(maxPossibleNodesTerminated, thoseLazyWorkers.size());
|
||||
if (numNodesToTerminate <= 0) {
|
||||
log.info("Found no nodes to terminate.");
|
||||
return false;
|
||||
}
|
||||
|
||||
AutoScalingData terminated = autoScalingStrategy.terminate(
|
||||
Lists.transform(
|
||||
thoseLazyWorkers.subList(0, numNodesToTerminate),
|
||||
new Function<ZkWorker, String>()
|
||||
{
|
||||
@Override
|
||||
public String apply(ZkWorker input)
|
||||
{
|
||||
return input.getWorker().getIp();
|
||||
}
|
||||
}
|
||||
)
|
||||
);
|
||||
|
||||
if (terminated != null) {
|
||||
currentlyTerminating.addAll(terminated.getNodeIds());
|
||||
lastTerminateTime = new DateTime();
|
||||
scalingStats.addTerminateEvent(terminated);
|
||||
|
||||
return true;
|
||||
final Set<String> stillExisting = Sets.newHashSet();
|
||||
for (String s : currentlyTerminating) {
|
||||
if (workerNodeIds.contains(s)) {
|
||||
stillExisting.add(s);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Duration durSinceLastTerminate = new Duration(lastTerminateTime, new DateTime());
|
||||
currentlyTerminating.clear();
|
||||
currentlyTerminating.addAll(stillExisting);
|
||||
|
||||
log.info(
|
||||
"%s still terminating. Wait for all nodes to terminate before trying again.",
|
||||
currentlyTerminating
|
||||
);
|
||||
updateTargetWorkerCount(workerSetupData, pendingTasks, zkWorkers);
|
||||
|
||||
if (durSinceLastTerminate.isLongerThan(config.getMaxScalingDuration().toStandardDuration())) {
|
||||
log.makeAlert("Worker node termination taking too long!")
|
||||
.addData("millisSinceLastTerminate", durSinceLastTerminate.getMillis())
|
||||
.addData("terminatingCount", currentlyTerminating.size())
|
||||
.emit();
|
||||
final Predicate<ZkWorker> isLazyWorker = createLazyWorkerPredicate(config, workerSetupData);
|
||||
if (currentlyTerminating.isEmpty()) {
|
||||
final int excessWorkers = (zkWorkers.size() + currentlyProvisioning.size()) - targetWorkerCount;
|
||||
if (excessWorkers > 0) {
|
||||
final List<String> laziestWorkerIps =
|
||||
FluentIterable.from(zkWorkers)
|
||||
.filter(isLazyWorker)
|
||||
.limit(excessWorkers)
|
||||
.transform(
|
||||
new Function<ZkWorker, String>()
|
||||
{
|
||||
@Override
|
||||
public String apply(ZkWorker zkWorker)
|
||||
{
|
||||
return zkWorker.getWorker().getIp();
|
||||
}
|
||||
}
|
||||
)
|
||||
.toList();
|
||||
|
||||
currentlyTerminating.clear();
|
||||
if (laziestWorkerIps.isEmpty()) {
|
||||
log.info("Wanted to terminate %,d workers, but couldn't find any lazy ones!", excessWorkers);
|
||||
} else {
|
||||
log.info(
|
||||
"Terminating %,d workers (wanted %,d): %s",
|
||||
laziestWorkerIps.size(),
|
||||
excessWorkers,
|
||||
Joiner.on(", ").join(laziestWorkerIps)
|
||||
);
|
||||
|
||||
final AutoScalingData terminated = autoScalingStrategy.terminate(laziestWorkerIps);
|
||||
if (terminated != null) {
|
||||
currentlyTerminating.addAll(terminated.getNodeIds());
|
||||
lastTerminateTime = new DateTime();
|
||||
scalingStats.addTerminateEvent(terminated);
|
||||
didTerminate = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Duration durSinceLastTerminate = new Duration(lastTerminateTime, new DateTime());
|
||||
|
||||
log.info("%s terminating. Current wait time: %s", currentlyTerminating, durSinceLastTerminate);
|
||||
|
||||
if (durSinceLastTerminate.isLongerThan(config.getMaxScalingDuration().toStandardDuration())) {
|
||||
log.makeAlert("Worker node termination taking too long!")
|
||||
.addData("millisSinceLastTerminate", durSinceLastTerminate.getMillis())
|
||||
.addData("terminatingCount", currentlyTerminating.size())
|
||||
.emit();
|
||||
|
||||
currentlyTerminating.clear();
|
||||
}
|
||||
}
|
||||
|
||||
return didTerminate;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -261,16 +246,128 @@ public class SimpleResourceManagementStrategy implements ResourceManagementStrat
|
|||
return scalingStats;
|
||||
}
|
||||
|
||||
private boolean hasTaskPendingBeyondThreshold(Collection<RemoteTaskRunnerWorkItem> pendingTasks)
|
||||
private static Predicate<ZkWorker> createLazyWorkerPredicate(
|
||||
final SimpleResourceManagementConfig config,
|
||||
final WorkerSetupData workerSetupData
|
||||
)
|
||||
{
|
||||
long now = System.currentTimeMillis();
|
||||
for (TaskRunnerWorkItem pendingTask : pendingTasks) {
|
||||
final Duration durationSinceInsertion = new Duration(pendingTask.getQueueInsertionTime().getMillis(), now);
|
||||
final Duration timeoutDuration = config.getPendingTaskTimeout().toStandardDuration();
|
||||
if (durationSinceInsertion.isEqual(timeoutDuration) || durationSinceInsertion.isLongerThan(timeoutDuration)) {
|
||||
return true;
|
||||
final Predicate<ZkWorker> isValidWorker = createValidWorkerPredicate(config, workerSetupData);
|
||||
|
||||
return new Predicate<ZkWorker>()
|
||||
{
|
||||
@Override
|
||||
public boolean apply(ZkWorker worker)
|
||||
{
|
||||
final boolean itHasBeenAWhile = System.currentTimeMillis() - worker.getLastCompletedTaskTime().getMillis()
|
||||
>= config.getWorkerIdleTimeout().toStandardDuration().getMillis();
|
||||
return worker.getRunningTasks().isEmpty() && (itHasBeenAWhile || !isValidWorker.apply(worker));
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
private static Predicate<ZkWorker> createValidWorkerPredicate(
|
||||
final SimpleResourceManagementConfig config,
|
||||
final WorkerSetupData workerSetupData
|
||||
)
|
||||
{
|
||||
return new Predicate<ZkWorker>()
|
||||
{
|
||||
@Override
|
||||
public boolean apply(ZkWorker zkWorker)
|
||||
{
|
||||
final String minVersion = workerSetupData.getMinVersion() != null
|
||||
? workerSetupData.getMinVersion()
|
||||
: config.getWorkerVersion();
|
||||
if (minVersion == null) {
|
||||
throw new ISE("No minVersion found! It should be set in your runtime properties or configuration database.");
|
||||
}
|
||||
return zkWorker.isValidVersion(minVersion);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
private void updateTargetWorkerCount(
|
||||
final WorkerSetupData workerSetupData,
|
||||
final Collection<RemoteTaskRunnerWorkItem> pendingTasks,
|
||||
final Collection<ZkWorker> zkWorkers
|
||||
)
|
||||
{
|
||||
synchronized (lock) {
|
||||
final Collection<ZkWorker> validWorkers = Collections2.filter(
|
||||
zkWorkers,
|
||||
createValidWorkerPredicate(config, workerSetupData)
|
||||
);
|
||||
final Predicate<ZkWorker> isLazyWorker = createLazyWorkerPredicate(config, workerSetupData);
|
||||
|
||||
if (targetWorkerCount < 0) {
|
||||
// Initialize to size of current worker pool, subject to pool size limits
|
||||
targetWorkerCount = Math.max(
|
||||
Math.min(
|
||||
zkWorkers.size(),
|
||||
workerSetupData.getMaxNumWorkers()
|
||||
),
|
||||
workerSetupData.getMinNumWorkers()
|
||||
);
|
||||
log.info(
|
||||
"Starting with a target of %,d workers (current = %,d, min = %,d, max = %,d).",
|
||||
targetWorkerCount,
|
||||
validWorkers.size(),
|
||||
workerSetupData.getMinNumWorkers(),
|
||||
workerSetupData.getMaxNumWorkers()
|
||||
);
|
||||
}
|
||||
|
||||
final boolean atSteadyState = currentlyProvisioning.isEmpty()
|
||||
&& currentlyTerminating.isEmpty()
|
||||
&& validWorkers.size() == targetWorkerCount;
|
||||
final boolean shouldScaleUp = atSteadyState
|
||||
&& hasTaskPendingBeyondThreshold(pendingTasks)
|
||||
&& targetWorkerCount < workerSetupData.getMaxNumWorkers();
|
||||
final boolean shouldScaleDown = atSteadyState
|
||||
&& Iterables.any(validWorkers, isLazyWorker)
|
||||
&& targetWorkerCount > workerSetupData.getMinNumWorkers();
|
||||
if (shouldScaleUp) {
|
||||
targetWorkerCount++;
|
||||
log.info(
|
||||
"I think we should scale up to %,d workers (current = %,d, min = %,d, max = %,d).",
|
||||
targetWorkerCount,
|
||||
validWorkers.size(),
|
||||
workerSetupData.getMinNumWorkers(),
|
||||
workerSetupData.getMaxNumWorkers()
|
||||
);
|
||||
} else if (shouldScaleDown) {
|
||||
targetWorkerCount--;
|
||||
log.info(
|
||||
"I think we should scale down to %,d workers (current = %,d, min = %,d, max = %,d).",
|
||||
targetWorkerCount,
|
||||
validWorkers.size(),
|
||||
workerSetupData.getMinNumWorkers(),
|
||||
workerSetupData.getMaxNumWorkers()
|
||||
);
|
||||
} else {
|
||||
log.info(
|
||||
"Our target is %,d workers, and I'm okay with that (current = %,d, min = %,d, max = %,d).",
|
||||
targetWorkerCount,
|
||||
validWorkers.size(),
|
||||
workerSetupData.getMinNumWorkers(),
|
||||
workerSetupData.getMaxNumWorkers()
|
||||
);
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private boolean hasTaskPendingBeyondThreshold(Collection<RemoteTaskRunnerWorkItem> pendingTasks)
|
||||
{
|
||||
synchronized (lock) {
|
||||
long now = System.currentTimeMillis();
|
||||
for (TaskRunnerWorkItem pendingTask : pendingTasks) {
|
||||
final Duration durationSinceInsertion = new Duration(pendingTask.getQueueInsertionTime().getMillis(), now);
|
||||
final Duration timeoutDuration = config.getPendingTaskTimeout().toStandardDuration();
|
||||
if (durationSinceInsertion.isEqual(timeoutDuration) || durationSinceInsertion.isLongerThan(timeoutDuration)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,7 +38,7 @@ public class WorkerConfig
|
|||
|
||||
@JsonProperty
|
||||
@Min(1)
|
||||
private int capacity = Runtime.getRuntime().availableProcessors() - 1;
|
||||
private int capacity = Math.max(1, Runtime.getRuntime().availableProcessors() - 1);
|
||||
|
||||
public String getIp()
|
||||
{
|
||||
|
|
|
@ -20,16 +20,19 @@
|
|||
package io.druid.indexing.worker.executor;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.api.client.repackaged.com.google.common.base.Preconditions;
|
||||
import com.google.common.base.Function;
|
||||
import com.google.common.base.Throwables;
|
||||
import com.google.common.util.concurrent.Futures;
|
||||
import com.google.common.util.concurrent.ListenableFuture;
|
||||
import com.google.inject.Inject;
|
||||
import com.metamx.common.ISE;
|
||||
import com.metamx.common.lifecycle.LifecycleStart;
|
||||
import com.metamx.common.lifecycle.LifecycleStop;
|
||||
import com.metamx.emitter.EmittingLogger;
|
||||
import io.druid.concurrent.Execs;
|
||||
import io.druid.indexing.common.TaskStatus;
|
||||
import io.druid.indexing.common.actions.TaskActionClientFactory;
|
||||
import io.druid.indexing.common.task.Task;
|
||||
import io.druid.indexing.overlord.TaskRunner;
|
||||
|
||||
|
@ -47,6 +50,7 @@ public class ExecutorLifecycle
|
|||
private static final EmittingLogger log = new EmittingLogger(ExecutorLifecycle.class);
|
||||
|
||||
private final ExecutorLifecycleConfig config;
|
||||
private final TaskActionClientFactory taskActionClientFactory;
|
||||
private final TaskRunner taskRunner;
|
||||
private final ObjectMapper jsonMapper;
|
||||
|
||||
|
@ -57,11 +61,13 @@ public class ExecutorLifecycle
|
|||
@Inject
|
||||
public ExecutorLifecycle(
|
||||
ExecutorLifecycleConfig config,
|
||||
TaskActionClientFactory taskActionClientFactory,
|
||||
TaskRunner taskRunner,
|
||||
ObjectMapper jsonMapper
|
||||
)
|
||||
{
|
||||
this.config = config;
|
||||
this.taskActionClientFactory = taskActionClientFactory;
|
||||
this.taskRunner = taskRunner;
|
||||
this.jsonMapper = jsonMapper;
|
||||
}
|
||||
|
@ -69,9 +75,9 @@ public class ExecutorLifecycle
|
|||
@LifecycleStart
|
||||
public void start()
|
||||
{
|
||||
final File taskFile = config.getTaskFile();
|
||||
final File statusFile = config.getStatusFile();
|
||||
final InputStream parentStream = config.getParentStream();
|
||||
final File taskFile = Preconditions.checkNotNull(config.getTaskFile(), "taskFile");
|
||||
final File statusFile = Preconditions.checkNotNull(config.getStatusFile(), "statusFile");
|
||||
final InputStream parentStream = Preconditions.checkNotNull(config.getParentStream(), "parentStream");
|
||||
|
||||
final Task task;
|
||||
|
||||
|
@ -111,28 +117,41 @@ public class ExecutorLifecycle
|
|||
}
|
||||
);
|
||||
|
||||
statusFuture = Futures.transform(
|
||||
taskRunner.run(task), new Function<TaskStatus, TaskStatus>()
|
||||
{
|
||||
@Override
|
||||
public TaskStatus apply(TaskStatus taskStatus)
|
||||
{
|
||||
try {
|
||||
log.info(
|
||||
"Task completed with status: %s",
|
||||
jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(taskStatus)
|
||||
);
|
||||
|
||||
statusFile.getParentFile().mkdirs();
|
||||
jsonMapper.writeValue(statusFile, taskStatus);
|
||||
|
||||
return taskStatus;
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw Throwables.propagate(e);
|
||||
}
|
||||
// Won't hurt in remote mode, and is required for setting up locks in local mode:
|
||||
try {
|
||||
if (!task.isReady(taskActionClientFactory.create(task))) {
|
||||
throw new ISE("Task is not ready to run yet!", task.getId());
|
||||
}
|
||||
} catch (Exception e) {
|
||||
throw new ISE(e, "Failed to run isReady", task.getId());
|
||||
}
|
||||
|
||||
statusFuture = Futures.transform(
|
||||
taskRunner.run(task),
|
||||
new Function<TaskStatus, TaskStatus>()
|
||||
{
|
||||
@Override
|
||||
public TaskStatus apply(TaskStatus taskStatus)
|
||||
{
|
||||
try {
|
||||
log.info(
|
||||
"Task completed with status: %s",
|
||||
jsonMapper.writerWithDefaultPrettyPrinter().writeValueAsString(taskStatus)
|
||||
);
|
||||
|
||||
final File statusFileParent = statusFile.getParentFile();
|
||||
if (statusFileParent != null) {
|
||||
statusFileParent.mkdirs();
|
||||
}
|
||||
jsonMapper.writeValue(statusFile, taskStatus);
|
||||
|
||||
return taskStatus;
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw Throwables.propagate(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
@ -43,16 +43,24 @@
|
|||
<div class="running_loading">Loading Running Tasks... this may take a few minutes</div>
|
||||
<table id="runningTable"></table>
|
||||
|
||||
<h2>Pending Tasks</h2>
|
||||
<h2>Pending Tasks - Tasks waiting to be assigned to a worker</h2>
|
||||
<div class="pending_loading">Loading Pending Tasks... this may take a few minutes</div>
|
||||
<table id="pendingTable"></table>
|
||||
|
||||
<h2>Workers</h2>
|
||||
<h2>Waiting Tasks - Tasks waiting on locks</h2>
|
||||
<div class="waiting_loading">Loading Waiting Tasks... this may take a few minutes</div>
|
||||
<table id="waitingTable"></table>
|
||||
|
||||
<h2>Complete Tasks - Tasks recently completed</h2>
|
||||
<div class="complete_loading">Loading Complete Tasks... this may take a few minutes</div>
|
||||
<table id="completeTable"></table>
|
||||
|
||||
<h2>Remote Workers</h2>
|
||||
<div class="workers_loading">Loading Workers... this may take a few minutes</div>
|
||||
<table id="workerTable"></table>
|
||||
|
||||
<h2>Event Log</h2>
|
||||
<div class="events_loading">Loading Event Log... this may take a few minutes</div>
|
||||
<h2>Autoscaling Activity</h2>
|
||||
<div class="events_loading">Loading Autoscaling Activities... this may take a few minutes</div>
|
||||
<table id="eventTable"></table>
|
||||
</div>
|
||||
</body>
|
||||
|
|
|
@ -3,14 +3,39 @@
|
|||
var oTable = [];
|
||||
|
||||
$(document).ready(function() {
|
||||
var augment = function(data) {
|
||||
for (i = 0 ; i < data.length ; i++) {
|
||||
var taskId = encodeURIComponent(data[i].id)
|
||||
data[i].more =
|
||||
'<a href="/druid/indexer/v1/task/' + taskId + '">payload</a>' +
|
||||
'<a href="/druid/indexer/v1/task/' + taskId + '/status">status</a>' +
|
||||
'<a href="/druid/indexer/v1/task/' + taskId + '/log">log (all)</a>' +
|
||||
'<a href="/druid/indexer/v1/task/' + taskId + '/log?offset=-8192">log (last 8kb)</a>'
|
||||
}
|
||||
}
|
||||
|
||||
$.get('/druid/indexer/v1/runningTasks', function(data) {
|
||||
$('.running_loading').hide();
|
||||
buildTable(data, $('#runningTable'), ["segments"]);
|
||||
augment(data);
|
||||
buildTable(data, $('#runningTable'));
|
||||
});
|
||||
|
||||
$.get('/druid/indexer/v1/pendingTasks', function(data) {
|
||||
$('.pending_loading').hide();
|
||||
buildTable(data, $('#pendingTable'), ["segments"]);
|
||||
augment(data);
|
||||
buildTable(data, $('#pendingTable'));
|
||||
});
|
||||
|
||||
$.get('/druid/indexer/v1/waitingTasks', function(data) {
|
||||
$('.waiting_loading').hide();
|
||||
augment(data);
|
||||
buildTable(data, $('#waitingTable'));
|
||||
});
|
||||
|
||||
$.get('/druid/indexer/v1/completeTasks', function(data) {
|
||||
$('.complete_loading').hide();
|
||||
augment(data);
|
||||
buildTable(data, $('#completeTable'));
|
||||
});
|
||||
|
||||
$.get('/druid/indexer/v1/workers', function(data) {
|
||||
|
@ -22,4 +47,4 @@ $(document).ready(function() {
|
|||
$('.events_loading').hide();
|
||||
buildTable(data, $('#eventTable'));
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
@ -44,7 +44,7 @@ public class TestMergeTask extends MergeTask
|
|||
Lists.<DataSegment>newArrayList(
|
||||
new DataSegment(
|
||||
"dummyDs",
|
||||
new Interval(new DateTime(), new DateTime()),
|
||||
new Interval(new DateTime(), new DateTime().plus(1)),
|
||||
new DateTime().toString(),
|
||||
null,
|
||||
null,
|
||||
|
|
|
@ -32,7 +32,7 @@ import io.druid.timeline.partition.NoneShardSpec;
|
|||
/**
|
||||
*/
|
||||
@JsonTypeName("test_realtime")
|
||||
public class TestRealtimeTask extends RealtimeIndexTask implements TestTask
|
||||
public class TestRealtimeTask extends RealtimeIndexTask
|
||||
{
|
||||
private final TaskStatus status;
|
||||
|
||||
|
@ -64,13 +64,6 @@ public class TestRealtimeTask extends RealtimeIndexTask implements TestTask
|
|||
return "test_realtime";
|
||||
}
|
||||
|
||||
@Override
|
||||
@JsonProperty
|
||||
public TaskStatus getStatus()
|
||||
{
|
||||
return status;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TaskStatus run(TaskToolbox toolbox) throws Exception
|
||||
{
|
||||
|
|
|
@ -67,7 +67,7 @@ public class MergeTaskBaseTest
|
|||
@Test
|
||||
public void testInterval()
|
||||
{
|
||||
Assert.assertEquals(new Interval("2012-01-03/2012-01-07"), testMergeTaskBase.getImplicitLockInterval().get());
|
||||
Assert.assertEquals(new Interval("2012-01-03/2012-01-07"), testMergeTaskBase.getInterval());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -19,14 +19,15 @@
|
|||
|
||||
package io.druid.indexing.common.task;
|
||||
|
||||
import com.fasterxml.jackson.databind.Module;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.common.base.Optional;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.metamx.common.Granularity;
|
||||
import io.druid.data.input.impl.JSONDataSpec;
|
||||
import io.druid.data.input.impl.TimestampSpec;
|
||||
import io.druid.granularity.QueryGranularity;
|
||||
import io.druid.guice.FirehoseModule;
|
||||
import io.druid.indexer.HadoopDruidIndexerSchema;
|
||||
import io.druid.indexer.granularity.UniformGranularitySpec;
|
||||
import io.druid.indexer.rollup.DataRollupSpec;
|
||||
|
@ -36,6 +37,7 @@ import io.druid.query.aggregation.CountAggregatorFactory;
|
|||
import io.druid.query.aggregation.DoubleSumAggregatorFactory;
|
||||
import io.druid.segment.IndexGranularity;
|
||||
import io.druid.segment.realtime.Schema;
|
||||
import io.druid.segment.realtime.firehose.LocalFirehoseFactory;
|
||||
import io.druid.timeline.DataSegment;
|
||||
import io.druid.timeline.partition.NoneShardSpec;
|
||||
import junit.framework.Assert;
|
||||
|
@ -43,12 +45,14 @@ import org.joda.time.Interval;
|
|||
import org.joda.time.Period;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.File;
|
||||
|
||||
public class TaskSerdeTest
|
||||
{
|
||||
@Test
|
||||
public void testIndexTaskSerde() throws Exception
|
||||
{
|
||||
final Task task = new IndexTask(
|
||||
final IndexTask task = new IndexTask(
|
||||
null,
|
||||
"foo",
|
||||
new UniformGranularitySpec(Granularity.DAY, ImmutableList.of(new Interval("2010-01-01/P2D"))),
|
||||
|
@ -56,62 +60,34 @@ public class TaskSerdeTest
|
|||
new AggregatorFactory[]{new DoubleSumAggregatorFactory("met", "met")},
|
||||
QueryGranularity.NONE,
|
||||
10000,
|
||||
null,
|
||||
new LocalFirehoseFactory(new File("lol"), "rofl", null),
|
||||
-1
|
||||
);
|
||||
|
||||
final ObjectMapper jsonMapper = new DefaultObjectMapper();
|
||||
for (final Module jacksonModule : new FirehoseModule().getJacksonModules()) {
|
||||
jsonMapper.registerModule(jacksonModule);
|
||||
}
|
||||
final String json = jsonMapper.writeValueAsString(task);
|
||||
|
||||
Thread.sleep(100); // Just want to run the clock a bit to make sure the task id doesn't change
|
||||
final Task task2 = jsonMapper.readValue(json, Task.class);
|
||||
final IndexTask task2 = (IndexTask) jsonMapper.readValue(json, Task.class);
|
||||
|
||||
Assert.assertEquals("foo", task.getDataSource());
|
||||
Assert.assertEquals(Optional.of(new Interval("2010-01-01/P2D")), task.getImplicitLockInterval());
|
||||
Assert.assertEquals(new Interval("2010-01-01/P2D"), task.getInterval());
|
||||
|
||||
Assert.assertEquals(task.getId(), task2.getId());
|
||||
Assert.assertEquals(task.getGroupId(), task2.getGroupId());
|
||||
Assert.assertEquals(task.getDataSource(), task2.getDataSource());
|
||||
Assert.assertEquals(task.getImplicitLockInterval(), task2.getImplicitLockInterval());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testIndexGeneratorTaskSerde() throws Exception
|
||||
{
|
||||
final Task task = new IndexGeneratorTask(
|
||||
null,
|
||||
"foo",
|
||||
new Interval("2010-01-01/P1D"),
|
||||
null,
|
||||
new Schema(
|
||||
"foo",
|
||||
null,
|
||||
new AggregatorFactory[]{new DoubleSumAggregatorFactory("met", "met")},
|
||||
QueryGranularity.NONE,
|
||||
new NoneShardSpec()
|
||||
),
|
||||
-1
|
||||
);
|
||||
|
||||
final ObjectMapper jsonMapper = new DefaultObjectMapper();
|
||||
final String json = jsonMapper.writeValueAsString(task);
|
||||
|
||||
Thread.sleep(100); // Just want to run the clock a bit to make sure the task id doesn't change
|
||||
final Task task2 = jsonMapper.readValue(json, Task.class);
|
||||
|
||||
Assert.assertEquals("foo", task.getDataSource());
|
||||
Assert.assertEquals(Optional.of(new Interval("2010-01-01/P1D")), task.getImplicitLockInterval());
|
||||
|
||||
Assert.assertEquals(task.getId(), task2.getId());
|
||||
Assert.assertEquals(task.getGroupId(), task2.getGroupId());
|
||||
Assert.assertEquals(task.getDataSource(), task2.getDataSource());
|
||||
Assert.assertEquals(task.getImplicitLockInterval(), task2.getImplicitLockInterval());
|
||||
Assert.assertEquals(task.getInterval(), task2.getInterval());
|
||||
Assert.assertTrue(task.getFirehoseFactory() instanceof LocalFirehoseFactory);
|
||||
Assert.assertTrue(task2.getFirehoseFactory() instanceof LocalFirehoseFactory);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMergeTaskSerde() throws Exception
|
||||
{
|
||||
final Task task = new MergeTask(
|
||||
final MergeTask task = new MergeTask(
|
||||
null,
|
||||
"foo",
|
||||
ImmutableList.<DataSegment>of(
|
||||
|
@ -126,26 +102,26 @@ public class TaskSerdeTest
|
|||
final String json = jsonMapper.writeValueAsString(task);
|
||||
|
||||
Thread.sleep(100); // Just want to run the clock a bit to make sure the task id doesn't change
|
||||
final Task task2 = jsonMapper.readValue(json, Task.class);
|
||||
final MergeTask task2 = (MergeTask) jsonMapper.readValue(json, Task.class);
|
||||
|
||||
Assert.assertEquals("foo", task.getDataSource());
|
||||
Assert.assertEquals(Optional.of(new Interval("2010-01-01/P1D")), task.getImplicitLockInterval());
|
||||
Assert.assertEquals(new Interval("2010-01-01/P1D"), task.getInterval());
|
||||
|
||||
Assert.assertEquals(task.getId(), task2.getId());
|
||||
Assert.assertEquals(task.getGroupId(), task2.getGroupId());
|
||||
Assert.assertEquals(task.getDataSource(), task2.getDataSource());
|
||||
Assert.assertEquals(task.getImplicitLockInterval(), task2.getImplicitLockInterval());
|
||||
Assert.assertEquals(((MergeTask) task).getSegments(), ((MergeTask) task2).getSegments());
|
||||
Assert.assertEquals(task.getInterval(), task2.getInterval());
|
||||
Assert.assertEquals(task.getSegments(), task2.getSegments());
|
||||
Assert.assertEquals(
|
||||
((MergeTask) task).getAggregators().get(0).getName(),
|
||||
((MergeTask) task2).getAggregators().get(0).getName()
|
||||
task.getAggregators().get(0).getName(),
|
||||
task2.getAggregators().get(0).getName()
|
||||
);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testKillTaskSerde() throws Exception
|
||||
{
|
||||
final Task task = new KillTask(
|
||||
final KillTask task = new KillTask(
|
||||
null,
|
||||
"foo",
|
||||
new Interval("2010-01-01/P1D")
|
||||
|
@ -155,21 +131,21 @@ public class TaskSerdeTest
|
|||
final String json = jsonMapper.writeValueAsString(task);
|
||||
|
||||
Thread.sleep(100); // Just want to run the clock a bit to make sure the task id doesn't change
|
||||
final Task task2 = jsonMapper.readValue(json, Task.class);
|
||||
final KillTask task2 = (KillTask) jsonMapper.readValue(json, Task.class);
|
||||
|
||||
Assert.assertEquals("foo", task.getDataSource());
|
||||
Assert.assertEquals(Optional.of(new Interval("2010-01-01/P1D")), task.getImplicitLockInterval());
|
||||
Assert.assertEquals(new Interval("2010-01-01/P1D"), task.getInterval());
|
||||
|
||||
Assert.assertEquals(task.getId(), task2.getId());
|
||||
Assert.assertEquals(task.getGroupId(), task2.getGroupId());
|
||||
Assert.assertEquals(task.getDataSource(), task2.getDataSource());
|
||||
Assert.assertEquals(task.getImplicitLockInterval(), task2.getImplicitLockInterval());
|
||||
Assert.assertEquals(task.getInterval(), task2.getInterval());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testVersionConverterTaskSerde() throws Exception
|
||||
{
|
||||
final Task task = VersionConverterTask.create(
|
||||
final VersionConverterTask task = VersionConverterTask.create(
|
||||
DataSegment.builder().dataSource("foo").interval(new Interval("2010-01-01/P1D")).version("1234").build()
|
||||
);
|
||||
|
||||
|
@ -177,22 +153,22 @@ public class TaskSerdeTest
|
|||
final String json = jsonMapper.writeValueAsString(task);
|
||||
|
||||
Thread.sleep(100); // Just want to run the clock a bit to make sure the task id doesn't change
|
||||
final Task task2 = jsonMapper.readValue(json, Task.class);
|
||||
final VersionConverterTask task2 = (VersionConverterTask) jsonMapper.readValue(json, Task.class);
|
||||
|
||||
Assert.assertEquals("foo", task.getDataSource());
|
||||
Assert.assertEquals(Optional.of(new Interval("2010-01-01/P1D")), task.getImplicitLockInterval());
|
||||
Assert.assertEquals(new Interval("2010-01-01/P1D"), task.getInterval());
|
||||
|
||||
Assert.assertEquals(task.getId(), task2.getId());
|
||||
Assert.assertEquals(task.getGroupId(), task2.getGroupId());
|
||||
Assert.assertEquals(task.getDataSource(), task2.getDataSource());
|
||||
Assert.assertEquals(task.getImplicitLockInterval(), task2.getImplicitLockInterval());
|
||||
Assert.assertEquals(((VersionConverterTask) task).getSegment(), ((VersionConverterTask) task).getSegment());
|
||||
Assert.assertEquals(task.getInterval(), task2.getInterval());
|
||||
Assert.assertEquals(task.getSegment(), task.getSegment());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testVersionConverterSubTaskSerde() throws Exception
|
||||
{
|
||||
final Task task = new VersionConverterTask.SubTask(
|
||||
final VersionConverterTask.SubTask task = new VersionConverterTask.SubTask(
|
||||
"myGroupId",
|
||||
DataSegment.builder().dataSource("foo").interval(new Interval("2010-01-01/P1D")).version("1234").build()
|
||||
);
|
||||
|
@ -201,26 +177,21 @@ public class TaskSerdeTest
|
|||
final String json = jsonMapper.writeValueAsString(task);
|
||||
|
||||
Thread.sleep(100); // Just want to run the clock a bit to make sure the task id doesn't change
|
||||
final Task task2 = jsonMapper.readValue(json, Task.class);
|
||||
final VersionConverterTask.SubTask task2 = (VersionConverterTask.SubTask) jsonMapper.readValue(json, Task.class);
|
||||
|
||||
Assert.assertEquals("foo", task.getDataSource());
|
||||
Assert.assertEquals(Optional.of(new Interval("2010-01-01/P1D")), task.getImplicitLockInterval());
|
||||
Assert.assertEquals("myGroupId", task.getGroupId());
|
||||
|
||||
Assert.assertEquals(task.getId(), task2.getId());
|
||||
Assert.assertEquals(task.getGroupId(), task2.getGroupId());
|
||||
Assert.assertEquals(task.getDataSource(), task2.getDataSource());
|
||||
Assert.assertEquals(task.getImplicitLockInterval(), task2.getImplicitLockInterval());
|
||||
Assert.assertEquals(
|
||||
((VersionConverterTask.SubTask) task).getSegment(),
|
||||
((VersionConverterTask.SubTask) task).getSegment()
|
||||
);
|
||||
Assert.assertEquals(task.getSegment(), task2.getSegment());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRealtimeIndexTaskSerde() throws Exception
|
||||
{
|
||||
final Task task = new RealtimeIndexTask(
|
||||
final RealtimeIndexTask task = new RealtimeIndexTask(
|
||||
null,
|
||||
new TaskResource("rofl", 2),
|
||||
new Schema("foo", null, new AggregatorFactory[0], QueryGranularity.NONE, new NoneShardSpec()),
|
||||
|
@ -235,32 +206,27 @@ public class TaskSerdeTest
|
|||
final String json = jsonMapper.writeValueAsString(task);
|
||||
|
||||
Thread.sleep(100); // Just want to run the clock a bit to make sure the task id doesn't change
|
||||
final Task task2 = jsonMapper.readValue(json, Task.class);
|
||||
final RealtimeIndexTask task2 = (RealtimeIndexTask) jsonMapper.readValue(json, Task.class);
|
||||
|
||||
Assert.assertEquals("foo", task.getDataSource());
|
||||
Assert.assertEquals(Optional.<Interval>absent(), task.getImplicitLockInterval());
|
||||
Assert.assertEquals(2, task.getTaskResource().getRequiredCapacity());
|
||||
Assert.assertEquals("rofl", task.getTaskResource().getAvailabilityGroup());
|
||||
Assert.assertEquals(new Period("PT10M"), ((RealtimeIndexTask) task).getWindowPeriod());
|
||||
Assert.assertEquals(IndexGranularity.HOUR, ((RealtimeIndexTask) task).getSegmentGranularity());
|
||||
Assert.assertEquals(new Period("PT10M"), task.getWindowPeriod());
|
||||
Assert.assertEquals(IndexGranularity.HOUR, task.getSegmentGranularity());
|
||||
|
||||
Assert.assertEquals(task.getId(), task2.getId());
|
||||
Assert.assertEquals(task.getGroupId(), task2.getGroupId());
|
||||
Assert.assertEquals(task.getDataSource(), task2.getDataSource());
|
||||
Assert.assertEquals(task.getImplicitLockInterval(), task2.getImplicitLockInterval());
|
||||
Assert.assertEquals(task.getTaskResource().getRequiredCapacity(), task2.getTaskResource().getRequiredCapacity());
|
||||
Assert.assertEquals(task.getTaskResource().getAvailabilityGroup(), task2.getTaskResource().getAvailabilityGroup());
|
||||
Assert.assertEquals(((RealtimeIndexTask) task).getWindowPeriod(), ((RealtimeIndexTask) task2).getWindowPeriod());
|
||||
Assert.assertEquals(
|
||||
((RealtimeIndexTask) task).getSegmentGranularity(),
|
||||
((RealtimeIndexTask) task2).getSegmentGranularity()
|
||||
);
|
||||
Assert.assertEquals(task.getWindowPeriod(), task2.getWindowPeriod());
|
||||
Assert.assertEquals(task.getSegmentGranularity(), task2.getSegmentGranularity());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDeleteTaskSerde() throws Exception
|
||||
{
|
||||
final Task task = new DeleteTask(
|
||||
final DeleteTask task = new DeleteTask(
|
||||
null,
|
||||
"foo",
|
||||
new Interval("2010-01-01/P1D")
|
||||
|
@ -270,46 +236,44 @@ public class TaskSerdeTest
|
|||
final String json = jsonMapper.writeValueAsString(task);
|
||||
|
||||
Thread.sleep(100); // Just want to run the clock a bit to make sure the task id doesn't change
|
||||
final Task task2 = jsonMapper.readValue(json, Task.class);
|
||||
final DeleteTask task2 = (DeleteTask) jsonMapper.readValue(json, Task.class);
|
||||
|
||||
Assert.assertEquals("foo", task.getDataSource());
|
||||
Assert.assertEquals(Optional.of(new Interval("2010-01-01/P1D")), task.getImplicitLockInterval());
|
||||
Assert.assertEquals(new Interval("2010-01-01/P1D"), task.getInterval());
|
||||
|
||||
Assert.assertEquals(task.getId(), task2.getId());
|
||||
Assert.assertEquals(task.getGroupId(), task2.getGroupId());
|
||||
Assert.assertEquals(task.getDataSource(), task2.getDataSource());
|
||||
Assert.assertEquals(task.getImplicitLockInterval(), task2.getImplicitLockInterval());
|
||||
Assert.assertEquals(task.getImplicitLockInterval().get(), task2.getImplicitLockInterval().get());
|
||||
Assert.assertEquals(task.getInterval(), task2.getInterval());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDeleteTaskFromJson() throws Exception
|
||||
{
|
||||
final ObjectMapper jsonMapper = new DefaultObjectMapper();
|
||||
final Task task = jsonMapper.readValue(
|
||||
final DeleteTask task = (DeleteTask) jsonMapper.readValue(
|
||||
"{\"type\":\"delete\",\"dataSource\":\"foo\",\"interval\":\"2010-01-01/P1D\"}",
|
||||
Task.class
|
||||
);
|
||||
final String json = jsonMapper.writeValueAsString(task);
|
||||
|
||||
Thread.sleep(100); // Just want to run the clock a bit to make sure the task id doesn't change
|
||||
final Task task2 = jsonMapper.readValue(json, Task.class);
|
||||
final DeleteTask task2 = (DeleteTask) jsonMapper.readValue(json, Task.class);
|
||||
|
||||
Assert.assertNotNull(task.getId());
|
||||
Assert.assertEquals("foo", task.getDataSource());
|
||||
Assert.assertEquals(Optional.of(new Interval("2010-01-01/P1D")), task.getImplicitLockInterval());
|
||||
Assert.assertEquals(new Interval("2010-01-01/P1D"), task.getInterval());
|
||||
|
||||
Assert.assertEquals(task.getId(), task2.getId());
|
||||
Assert.assertEquals(task.getGroupId(), task2.getGroupId());
|
||||
Assert.assertEquals(task.getDataSource(), task2.getDataSource());
|
||||
Assert.assertEquals(task.getImplicitLockInterval(), task2.getImplicitLockInterval());
|
||||
Assert.assertEquals(task.getImplicitLockInterval().get(), task2.getImplicitLockInterval().get());
|
||||
Assert.assertEquals(task.getInterval(), task2.getInterval());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAppendTaskSerde() throws Exception
|
||||
{
|
||||
final Task task = new AppendTask(
|
||||
final AppendTask task = new AppendTask(
|
||||
null,
|
||||
"foo",
|
||||
ImmutableList.of(
|
||||
|
@ -321,17 +285,67 @@ public class TaskSerdeTest
|
|||
final String json = jsonMapper.writeValueAsString(task);
|
||||
|
||||
Thread.sleep(100); // Just want to run the clock a bit to make sure the task id doesn't change
|
||||
final Task task2 = jsonMapper.readValue(json, Task.class);
|
||||
final AppendTask task2 = (AppendTask) jsonMapper.readValue(json, Task.class);
|
||||
|
||||
Assert.assertEquals("foo", task.getDataSource());
|
||||
Assert.assertEquals(Optional.of(new Interval("2010-01-01/P1D")), task.getImplicitLockInterval());
|
||||
Assert.assertEquals(new Interval("2010-01-01/P1D"), task.getInterval());
|
||||
|
||||
Assert.assertEquals(task.getId(), task2.getId());
|
||||
Assert.assertEquals(task.getGroupId(), task2.getGroupId());
|
||||
Assert.assertEquals(task.getDataSource(), task2.getDataSource());
|
||||
Assert.assertEquals(task.getImplicitLockInterval(), task2.getImplicitLockInterval());
|
||||
Assert.assertEquals(task.getImplicitLockInterval().get(), task2.getImplicitLockInterval().get());
|
||||
Assert.assertEquals(((AppendTask) task).getSegments(), ((AppendTask) task2).getSegments());
|
||||
Assert.assertEquals(task.getInterval(), task2.getInterval());
|
||||
Assert.assertEquals(task.getSegments(), task2.getSegments());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testArchiveTaskSerde() throws Exception
|
||||
{
|
||||
final ArchiveTask task = new ArchiveTask(
|
||||
null,
|
||||
"foo",
|
||||
new Interval("2010-01-01/P1D")
|
||||
);
|
||||
|
||||
final ObjectMapper jsonMapper = new DefaultObjectMapper();
|
||||
final String json = jsonMapper.writeValueAsString(task);
|
||||
|
||||
Thread.sleep(100); // Just want to run the clock a bit to make sure the task id doesn't change
|
||||
final ArchiveTask task2 = (ArchiveTask) jsonMapper.readValue(json, Task.class);
|
||||
|
||||
Assert.assertEquals("foo", task.getDataSource());
|
||||
Assert.assertEquals(new Interval("2010-01-01/P1D"), task.getInterval());
|
||||
|
||||
Assert.assertEquals(task.getId(), task2.getId());
|
||||
Assert.assertEquals(task.getGroupId(), task2.getGroupId());
|
||||
Assert.assertEquals(task.getDataSource(), task2.getDataSource());
|
||||
Assert.assertEquals(task.getInterval(), task2.getInterval());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMoveTaskSerde() throws Exception
|
||||
{
|
||||
final MoveTask task = new MoveTask(
|
||||
null,
|
||||
"foo",
|
||||
new Interval("2010-01-01/P1D"),
|
||||
ImmutableMap.<String, Object>of("bucket", "hey", "baseKey", "what")
|
||||
);
|
||||
|
||||
final ObjectMapper jsonMapper = new DefaultObjectMapper();
|
||||
final String json = jsonMapper.writeValueAsString(task);
|
||||
|
||||
Thread.sleep(100); // Just want to run the clock a bit to make sure the task id doesn't change
|
||||
final MoveTask task2 = (MoveTask) jsonMapper.readValue(json, Task.class);
|
||||
|
||||
Assert.assertEquals("foo", task.getDataSource());
|
||||
Assert.assertEquals(new Interval("2010-01-01/P1D"), task.getInterval());
|
||||
Assert.assertEquals(ImmutableMap.<String, Object>of("bucket", "hey", "baseKey", "what"), task.getTargetLoadSpec());
|
||||
|
||||
Assert.assertEquals(task.getId(), task2.getId());
|
||||
Assert.assertEquals(task.getGroupId(), task2.getGroupId());
|
||||
Assert.assertEquals(task.getDataSource(), task2.getDataSource());
|
||||
Assert.assertEquals(task.getInterval(), task2.getInterval());
|
||||
Assert.assertEquals(task.getTargetLoadSpec(), task2.getTargetLoadSpec());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -364,14 +378,14 @@ public class TaskSerdeTest
|
|||
|
||||
final ObjectMapper jsonMapper = new DefaultObjectMapper();
|
||||
final String json = jsonMapper.writeValueAsString(task);
|
||||
final Task task2 = jsonMapper.readValue(json, Task.class);
|
||||
final HadoopIndexTask task2 = (HadoopIndexTask) jsonMapper.readValue(json, Task.class);
|
||||
|
||||
Assert.assertEquals("foo", task.getDataSource());
|
||||
Assert.assertEquals(Optional.of(new Interval("2010-01-01/P1D")), task.getImplicitLockInterval());
|
||||
Assert.assertEquals(new Interval("2010-01-01/P1D"), task.getInterval());
|
||||
|
||||
Assert.assertEquals(task.getId(), task2.getId());
|
||||
Assert.assertEquals(task.getGroupId(), task2.getGroupId());
|
||||
Assert.assertEquals(task.getDataSource(), task2.getDataSource());
|
||||
Assert.assertEquals(task.getImplicitLockInterval(), task2.getImplicitLockInterval());
|
||||
Assert.assertEquals(task.getInterval(), task2.getInterval());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ import io.druid.indexing.common.actions.LockAcquireAction;
|
|||
import io.druid.indexing.common.actions.LockListAction;
|
||||
import io.druid.indexing.common.actions.LockReleaseAction;
|
||||
import io.druid.indexing.common.actions.SegmentInsertAction;
|
||||
import io.druid.indexing.common.actions.TaskActionClient;
|
||||
import io.druid.indexing.common.task.AbstractTask;
|
||||
import io.druid.indexing.common.task.TaskResource;
|
||||
import io.druid.timeline.DataSegment;
|
||||
|
@ -42,12 +43,12 @@ public class RealtimeishTask extends AbstractTask
|
|||
{
|
||||
public RealtimeishTask()
|
||||
{
|
||||
super("rt1", "rt", new TaskResource("rt1", 1), "foo", null);
|
||||
super("rt1", "rt", new TaskResource("rt1", 1), "foo");
|
||||
}
|
||||
|
||||
public RealtimeishTask(String id, String groupId, TaskResource taskResource, String dataSource, Interval interval)
|
||||
public RealtimeishTask(String id, String groupId, TaskResource taskResource, String dataSource)
|
||||
{
|
||||
super(id, groupId, taskResource, dataSource, interval);
|
||||
super(id, groupId, taskResource, dataSource);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -56,6 +57,12 @@ public class RealtimeishTask extends AbstractTask
|
|||
return "realtime_test";
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isReady(TaskActionClient taskActionClient) throws Exception
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TaskStatus run(TaskToolbox toolbox) throws Exception
|
||||
{
|
||||
|
|
|
@ -23,8 +23,8 @@ import com.fasterxml.jackson.databind.ObjectMapper;
|
|||
import com.google.api.client.repackaged.com.google.common.base.Throwables;
|
||||
import com.google.common.base.Function;
|
||||
import com.google.common.base.Joiner;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Sets;
|
||||
import com.google.common.util.concurrent.ListenableFuture;
|
||||
import com.metamx.emitter.EmittingLogger;
|
||||
|
@ -55,7 +55,6 @@ import org.junit.Assert;
|
|||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
@ -219,7 +218,7 @@ public class RemoteTaskRunnerTest
|
|||
)
|
||||
);
|
||||
|
||||
Assert.assertTrue(remoteTaskRunner.getPendingTasks().iterator().next().getTask().getId().equals("rt2"));
|
||||
Assert.assertTrue(remoteTaskRunner.getPendingTasks().iterator().next().getTaskId().equals("rt2"));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -266,7 +265,7 @@ public class RemoteTaskRunnerTest
|
|||
)
|
||||
);
|
||||
|
||||
Assert.assertTrue(remoteTaskRunner.getPendingTasks().iterator().next().getTask().getId().equals("rt2"));
|
||||
Assert.assertTrue(remoteTaskRunner.getPendingTasks().iterator().next().getTaskId().equals("rt2"));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -280,7 +279,7 @@ public class RemoteTaskRunnerTest
|
|||
|
||||
Assert.assertTrue(workerRunningTask(task.getId()));
|
||||
|
||||
Assert.assertTrue(remoteTaskRunner.getRunningTasks().iterator().next().getTask().getId().equals("task"));
|
||||
Assert.assertTrue(remoteTaskRunner.getRunningTasks().iterator().next().getTaskId().equals("task"));
|
||||
|
||||
cf.delete().forPath(joiner.join(statusPath, task.getId()));
|
||||
|
||||
|
@ -303,18 +302,13 @@ public class RemoteTaskRunnerTest
|
|||
|
||||
doSetup();
|
||||
|
||||
Set<String> existingTasks = Sets.newHashSet();
|
||||
final Set<String> existingTasks = Sets.newHashSet();
|
||||
for (ZkWorker zkWorker : remoteTaskRunner.getWorkers()) {
|
||||
existingTasks.addAll(zkWorker.getRunningTasks().keySet());
|
||||
}
|
||||
Assert.assertEquals("existingTasks", ImmutableSet.of("first", "second"), existingTasks);
|
||||
|
||||
Assert.assertTrue(existingTasks.size() == 2);
|
||||
Assert.assertTrue(existingTasks.contains("first"));
|
||||
Assert.assertTrue(existingTasks.contains("second"));
|
||||
|
||||
remoteTaskRunner.bootstrap(Arrays.<Task>asList(TestMergeTask.createDummyTask("second")));
|
||||
|
||||
Set<String> runningTasks = Sets.newHashSet(
|
||||
final Set<String> runningTasks = Sets.newHashSet(
|
||||
Iterables.transform(
|
||||
remoteTaskRunner.getRunningTasks(),
|
||||
new Function<RemoteTaskRunnerWorkItem, String>()
|
||||
|
@ -322,15 +316,12 @@ public class RemoteTaskRunnerTest
|
|||
@Override
|
||||
public String apply(RemoteTaskRunnerWorkItem input)
|
||||
{
|
||||
return input.getTask().getId();
|
||||
return input.getTaskId();
|
||||
}
|
||||
}
|
||||
)
|
||||
);
|
||||
|
||||
Assert.assertTrue(runningTasks.size() == 1);
|
||||
Assert.assertTrue(runningTasks.contains("second"));
|
||||
Assert.assertFalse(runningTasks.contains("first"));
|
||||
Assert.assertEquals("runningTasks", ImmutableSet.of("first", "second"), runningTasks);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -343,8 +334,6 @@ public class RemoteTaskRunnerTest
|
|||
|
||||
doSetup();
|
||||
|
||||
remoteTaskRunner.bootstrap(Arrays.<Task>asList(task));
|
||||
|
||||
ListenableFuture<TaskStatus> future = remoteTaskRunner.run(task);
|
||||
|
||||
TaskStatus status = future.get();
|
||||
|
@ -356,7 +345,6 @@ public class RemoteTaskRunnerTest
|
|||
public void testWorkerRemoved() throws Exception
|
||||
{
|
||||
doSetup();
|
||||
remoteTaskRunner.bootstrap(Lists.<Task>newArrayList());
|
||||
Future<TaskStatus> future = remoteTaskRunner.run(task);
|
||||
|
||||
Assert.assertTrue(taskAnnounced(task.getId()));
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package io.druid.indexing.overlord;
|
||||
|
||||
import com.google.api.client.repackaged.com.google.common.base.Preconditions;
|
||||
import com.google.common.base.Optional;
|
||||
import com.google.common.base.Throwables;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
|
@ -53,16 +54,19 @@ import io.druid.indexing.common.actions.SegmentInsertAction;
|
|||
import io.druid.indexing.common.actions.TaskActionClientFactory;
|
||||
import io.druid.indexing.common.actions.TaskActionToolbox;
|
||||
import io.druid.indexing.common.config.TaskConfig;
|
||||
import io.druid.indexing.common.task.AbstractTask;
|
||||
import io.druid.indexing.common.config.TaskStorageConfig;
|
||||
import io.druid.indexing.common.task.AbstractFixedIntervalTask;
|
||||
import io.druid.indexing.common.task.IndexTask;
|
||||
import io.druid.indexing.common.task.KillTask;
|
||||
import io.druid.indexing.common.task.Task;
|
||||
import io.druid.indexing.common.task.TaskResource;
|
||||
import io.druid.indexing.overlord.exec.TaskConsumer;
|
||||
import io.druid.indexing.overlord.config.TaskQueueConfig;
|
||||
import io.druid.jackson.DefaultObjectMapper;
|
||||
import io.druid.query.aggregation.AggregatorFactory;
|
||||
import io.druid.query.aggregation.DoubleSumAggregatorFactory;
|
||||
import io.druid.segment.loading.DataSegmentArchiver;
|
||||
import io.druid.segment.loading.DataSegmentKiller;
|
||||
import io.druid.segment.loading.DataSegmentMover;
|
||||
import io.druid.segment.loading.DataSegmentPuller;
|
||||
import io.druid.segment.loading.DataSegmentPusher;
|
||||
import io.druid.segment.loading.LocalDataSegmentPuller;
|
||||
|
@ -84,6 +88,7 @@ import java.io.File;
|
|||
import java.io.IOException;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
public class TaskLifecycleTest
|
||||
|
@ -96,7 +101,6 @@ public class TaskLifecycleTest
|
|||
private MockIndexerDBCoordinator mdc = null;
|
||||
private TaskActionClientFactory tac = null;
|
||||
private TaskToolboxFactory tb = null;
|
||||
private TaskConsumer tc = null;
|
||||
TaskStorageQueryAdapter tsqa = null;
|
||||
|
||||
private static final Ordering<DataSegment> byIntervalOrdering = new Ordering<DataSegment>()
|
||||
|
@ -109,18 +113,26 @@ public class TaskLifecycleTest
|
|||
};
|
||||
|
||||
@Before
|
||||
public void setUp()
|
||||
public void setUp() throws Exception
|
||||
{
|
||||
EmittingLogger.registerEmitter(EasyMock.createMock(ServiceEmitter.class));
|
||||
final ServiceEmitter emitter = EasyMock.createMock(ServiceEmitter.class);
|
||||
EmittingLogger.registerEmitter(emitter);
|
||||
|
||||
tmp = Files.createTempDir();
|
||||
|
||||
ts = new HeapMemoryTaskStorage();
|
||||
final TaskQueueConfig tqc = new DefaultObjectMapper().readValue(
|
||||
"{\"startDelay\":\"PT0S\", \"restartDelay\":\"PT1S\"}",
|
||||
TaskQueueConfig.class
|
||||
);
|
||||
ts = new HeapMemoryTaskStorage(
|
||||
new TaskStorageConfig()
|
||||
{
|
||||
}
|
||||
);
|
||||
tsqa = new TaskStorageQueryAdapter(ts);
|
||||
tl = new TaskLockbox(ts);
|
||||
tq = new TaskQueue(ts, tl);
|
||||
mdc = newMockMDC();
|
||||
tac = new LocalTaskActionClientFactory(ts, new TaskActionToolbox(tq, tl, mdc, newMockEmitter()));
|
||||
|
||||
tac = new LocalTaskActionClientFactory(ts, new TaskActionToolbox(tl, mdc, newMockEmitter()));
|
||||
tb = new TaskToolboxFactory(
|
||||
new TaskConfig(tmp.toString(), null, null, 50000),
|
||||
tac,
|
||||
|
@ -147,6 +159,22 @@ public class TaskLifecycleTest
|
|||
|
||||
}
|
||||
},
|
||||
new DataSegmentMover()
|
||||
{
|
||||
@Override
|
||||
public DataSegment move(DataSegment dataSegment, Map<String, Object> targetLoadSpec) throws SegmentLoadingException
|
||||
{
|
||||
return dataSegment;
|
||||
}
|
||||
},
|
||||
new DataSegmentArchiver()
|
||||
{
|
||||
@Override
|
||||
public DataSegment archive(DataSegment segment) throws SegmentLoadingException
|
||||
{
|
||||
return segment;
|
||||
}
|
||||
},
|
||||
null, // segment announcer
|
||||
null, // new segment server view
|
||||
null, // query runner factory conglomerate corporation unionized collective
|
||||
|
@ -171,14 +199,9 @@ public class TaskLifecycleTest
|
|||
),
|
||||
new DefaultObjectMapper()
|
||||
);
|
||||
|
||||
tr = new ThreadPoolTaskRunner(tb);
|
||||
|
||||
tc = new TaskConsumer(tq, tr, tac, newMockEmitter());
|
||||
tsqa = new TaskStorageQueryAdapter(ts);
|
||||
|
||||
tq = new TaskQueue(tqc, ts, tr, tac, tl, emitter);
|
||||
tq.start();
|
||||
tc.start();
|
||||
}
|
||||
|
||||
@After
|
||||
|
@ -190,7 +213,6 @@ public class TaskLifecycleTest
|
|||
catch (Exception e) {
|
||||
// suppress
|
||||
}
|
||||
tc.stop();
|
||||
tq.stop();
|
||||
}
|
||||
|
||||
|
@ -216,13 +238,13 @@ public class TaskLifecycleTest
|
|||
-1
|
||||
);
|
||||
|
||||
final Optional<TaskStatus> preRunTaskStatus = tsqa.getSameGroupMergedStatus(indexTask.getId());
|
||||
final Optional<TaskStatus> preRunTaskStatus = tsqa.getStatus(indexTask.getId());
|
||||
Assert.assertTrue("pre run task status not present", !preRunTaskStatus.isPresent());
|
||||
|
||||
final TaskStatus mergedStatus = runTask(indexTask);
|
||||
final TaskStatus status = ts.getStatus(indexTask.getId()).get();
|
||||
final List<DataSegment> publishedSegments = byIntervalOrdering.sortedCopy(mdc.getPublished());
|
||||
final List<DataSegment> loggedSegments = byIntervalOrdering.sortedCopy(tsqa.getSameGroupNewSegments(indexTask.getId()));
|
||||
final List<DataSegment> loggedSegments = byIntervalOrdering.sortedCopy(tsqa.getInsertedSegments(indexTask.getId()));
|
||||
|
||||
Assert.assertEquals("statusCode", TaskStatus.Status.SUCCESS, status.getStatusCode());
|
||||
Assert.assertEquals("merged statusCode", TaskStatus.Status.SUCCESS, mergedStatus.getStatusCode());
|
||||
|
@ -264,11 +286,9 @@ public class TaskLifecycleTest
|
|||
-1
|
||||
);
|
||||
|
||||
final TaskStatus mergedStatus = runTask(indexTask);
|
||||
final TaskStatus status = ts.getStatus(indexTask.getId()).get();
|
||||
final TaskStatus status = runTask(indexTask);
|
||||
|
||||
Assert.assertEquals("statusCode", TaskStatus.Status.SUCCESS, status.getStatusCode());
|
||||
Assert.assertEquals("merged statusCode", TaskStatus.Status.FAILED, mergedStatus.getStatusCode());
|
||||
Assert.assertEquals("statusCode", TaskStatus.Status.FAILED, status.getStatusCode());
|
||||
Assert.assertEquals("num segments published", 0, mdc.getPublished().size());
|
||||
Assert.assertEquals("num segments nuked", 0, mdc.getNuked().size());
|
||||
}
|
||||
|
@ -297,10 +317,44 @@ public class TaskLifecycleTest
|
|||
Assert.assertEquals("num segments nuked", 0, mdc.getNuked().size());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNoopTask() throws Exception
|
||||
{
|
||||
final Task noopTask = new DefaultObjectMapper().readValue(
|
||||
"{\"type\":\"noop\", \"runTime\":\"100\"}\"",
|
||||
Task.class
|
||||
);
|
||||
final TaskStatus status = runTask(noopTask);
|
||||
|
||||
Assert.assertEquals("statusCode", TaskStatus.Status.SUCCESS, status.getStatusCode());
|
||||
Assert.assertEquals("num segments published", 0, mdc.getPublished().size());
|
||||
Assert.assertEquals("num segments nuked", 0, mdc.getNuked().size());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNeverReadyTask() throws Exception
|
||||
{
|
||||
final Task neverReadyTask = new DefaultObjectMapper().readValue(
|
||||
"{\"type\":\"noop\", \"isReadyResult\":\"exception\"}\"",
|
||||
Task.class
|
||||
);
|
||||
final TaskStatus status = runTask(neverReadyTask);
|
||||
|
||||
Assert.assertEquals("statusCode", TaskStatus.Status.FAILED, status.getStatusCode());
|
||||
Assert.assertEquals("num segments published", 0, mdc.getPublished().size());
|
||||
Assert.assertEquals("num segments nuked", 0, mdc.getNuked().size());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSimple() throws Exception
|
||||
{
|
||||
final Task task = new AbstractTask("id1", "id1", new TaskResource("id1", 1), "ds", new Interval("2012-01-01/P1D"))
|
||||
final Task task = new AbstractFixedIntervalTask(
|
||||
"id1",
|
||||
"id1",
|
||||
new TaskResource("id1", 1),
|
||||
"ds",
|
||||
new Interval("2012-01-01/P1D")
|
||||
)
|
||||
{
|
||||
@Override
|
||||
public String getType()
|
||||
|
@ -337,7 +391,7 @@ public class TaskLifecycleTest
|
|||
@Test
|
||||
public void testBadInterval() throws Exception
|
||||
{
|
||||
final Task task = new AbstractTask("id1", "id1", "ds", new Interval("2012-01-01/P1D"))
|
||||
final Task task = new AbstractFixedIntervalTask("id1", "id1", "ds", new Interval("2012-01-01/P1D"))
|
||||
{
|
||||
@Override
|
||||
public String getType()
|
||||
|
@ -371,7 +425,7 @@ public class TaskLifecycleTest
|
|||
@Test
|
||||
public void testBadVersion() throws Exception
|
||||
{
|
||||
final Task task = new AbstractTask("id1", "id1", "ds", new Interval("2012-01-01/P1D"))
|
||||
final Task task = new AbstractFixedIntervalTask("id1", "id1", "ds", new Interval("2012-01-01/P1D"))
|
||||
{
|
||||
@Override
|
||||
public String getType()
|
||||
|
@ -402,28 +456,41 @@ public class TaskLifecycleTest
|
|||
Assert.assertEquals("segments nuked", 0, mdc.getNuked().size());
|
||||
}
|
||||
|
||||
private TaskStatus runTask(Task task)
|
||||
private TaskStatus runTask(final Task task) throws Exception
|
||||
{
|
||||
final Task dummyTask = new DefaultObjectMapper().readValue(
|
||||
"{\"type\":\"noop\", \"isReadyResult\":\"exception\"}\"",
|
||||
Task.class
|
||||
);
|
||||
final long startTime = System.currentTimeMillis();
|
||||
|
||||
Preconditions.checkArgument(!task.getId().equals(dummyTask.getId()));
|
||||
|
||||
tq.add(dummyTask);
|
||||
tq.add(task);
|
||||
|
||||
TaskStatus status;
|
||||
TaskStatus retVal = null;
|
||||
|
||||
try {
|
||||
while ((status = tsqa.getSameGroupMergedStatus(task.getId()).get()).isRunnable()) {
|
||||
if (System.currentTimeMillis() > startTime + 10 * 1000) {
|
||||
throw new ISE("Where did the task go?!: %s", task.getId());
|
||||
for (final String taskId : ImmutableList.of(dummyTask.getId(), task.getId())) {
|
||||
try {
|
||||
TaskStatus status;
|
||||
while ((status = tsqa.getStatus(taskId).get()).isRunnable()) {
|
||||
if (System.currentTimeMillis() > startTime + 10 * 1000) {
|
||||
throw new ISE("Where did the task go?!: %s", task.getId());
|
||||
}
|
||||
|
||||
Thread.sleep(100);
|
||||
}
|
||||
|
||||
Thread.sleep(100);
|
||||
if (taskId.equals(task.getId())) {
|
||||
retVal = status;
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw Throwables.propagate(e);
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw Throwables.propagate(e);
|
||||
}
|
||||
|
||||
return status;
|
||||
return retVal;
|
||||
}
|
||||
|
||||
private static class MockIndexerDBCoordinator extends IndexerDBCoordinator
|
||||
|
|
|
@ -1,438 +0,0 @@
|
|||
/*
|
||||
* Druid - a distributed column store.
|
||||
* Copyright (C) 2012, 2013 Metamarkets Group Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
package io.druid.indexing.overlord;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Sets;
|
||||
import io.druid.indexing.common.SegmentLoaderFactory;
|
||||
import io.druid.indexing.common.TaskLock;
|
||||
import io.druid.indexing.common.TaskStatus;
|
||||
import io.druid.indexing.common.TaskToolbox;
|
||||
import io.druid.indexing.common.TaskToolboxFactory;
|
||||
import io.druid.indexing.common.actions.LocalTaskActionClientFactory;
|
||||
import io.druid.indexing.common.actions.SpawnTasksAction;
|
||||
import io.druid.indexing.common.actions.TaskActionToolbox;
|
||||
import io.druid.indexing.common.config.TaskConfig;
|
||||
import io.druid.indexing.common.task.AbstractTask;
|
||||
import io.druid.indexing.common.task.Task;
|
||||
import io.druid.segment.loading.DataSegmentPuller;
|
||||
import io.druid.segment.loading.LocalDataSegmentPuller;
|
||||
import io.druid.segment.loading.OmniSegmentLoader;
|
||||
import io.druid.segment.loading.SegmentLoaderConfig;
|
||||
import io.druid.segment.loading.StorageLocationConfig;
|
||||
import org.joda.time.Interval;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
public class TaskQueueTest
|
||||
{
|
||||
@Test
|
||||
public void testEmptyQueue() throws Exception
|
||||
{
|
||||
final TaskStorage ts = new HeapMemoryTaskStorage();
|
||||
final TaskLockbox tl = new TaskLockbox(ts);
|
||||
final TaskQueue tq = newTaskQueue(ts, tl);
|
||||
|
||||
// get task status for nonexistent task
|
||||
Assert.assertFalse("getStatus", ts.getStatus("foo").isPresent());
|
||||
|
||||
// poll on empty queue
|
||||
Assert.assertNull("poll", tq.poll());
|
||||
}
|
||||
|
||||
public static TaskQueue newTaskQueue(TaskStorage storage, TaskLockbox lockbox)
|
||||
{
|
||||
final TaskQueue tq = new TaskQueue(storage, lockbox);
|
||||
tq.bootstrap();
|
||||
tq.start();
|
||||
return tq;
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAddRemove() throws Exception
|
||||
{
|
||||
final TaskStorage ts = new HeapMemoryTaskStorage();
|
||||
final TaskLockbox tl = new TaskLockbox(ts);
|
||||
final TaskQueue tq = newTaskQueue(ts, tl);
|
||||
|
||||
final Task[] tasks = {
|
||||
newTask("T0", "G0", "bar", new Interval("2011/P1Y")),
|
||||
newTask("T1", "G1", "bar", new Interval("2011-03-01/P1D")),
|
||||
newTask("T2", "G2", "foo", new Interval("2011-03-01/P1D")),
|
||||
newTask("T3", "G3", "foo", new Interval("2011/P1Y")),
|
||||
newTask("T4", "G4", "foo", new Interval("2012-01-02/P1D")),
|
||||
newTask("T5", "G5", "foo", new Interval("2012-02-01/PT1H"))
|
||||
};
|
||||
|
||||
Throwable thrown;
|
||||
|
||||
for (Task task : tasks) {
|
||||
tq.add(task);
|
||||
}
|
||||
|
||||
// get task status for in-progress task
|
||||
Assert.assertEquals(
|
||||
"T2 status (before finishing)",
|
||||
TaskStatus.Status.RUNNING,
|
||||
ts.getStatus(tasks[2].getId()).get().getStatusCode()
|
||||
);
|
||||
|
||||
// Can't add tasks with the same id
|
||||
thrown = null;
|
||||
try {
|
||||
tq.add(newTask("T5", "G5", "baz", new Interval("2013-02-01/PT1H")));
|
||||
}
|
||||
catch (TaskExistsException e) {
|
||||
thrown = e;
|
||||
}
|
||||
|
||||
Assert.assertNotNull("Exception on duplicate task id", thrown);
|
||||
|
||||
// take max number of tasks
|
||||
final List<Task> taken = Lists.newArrayList();
|
||||
while (true) {
|
||||
final Task task = tq.poll();
|
||||
if (task != null) {
|
||||
taken.add(task);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// check them
|
||||
Assert.assertEquals(
|
||||
"Taken tasks (round 1)",
|
||||
Lists.newArrayList(
|
||||
tasks[0], tasks[2], tasks[4], tasks[5]
|
||||
),
|
||||
taken
|
||||
);
|
||||
|
||||
// mark one done
|
||||
tq.notify(tasks[2], tasks[2].run(null));
|
||||
|
||||
// get its status back
|
||||
Assert.assertEquals(
|
||||
"T2 status (after finishing)",
|
||||
TaskStatus.Status.SUCCESS,
|
||||
ts.getStatus(tasks[2].getId()).get().getStatusCode()
|
||||
);
|
||||
|
||||
// We should be able to get one more task now
|
||||
taken.clear();
|
||||
while (true) {
|
||||
final Task task = tq.poll();
|
||||
if (task != null) {
|
||||
taken.add(task);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// check it
|
||||
Assert.assertEquals(
|
||||
"Taken tasks (round 2)",
|
||||
Lists.newArrayList(
|
||||
tasks[3]
|
||||
),
|
||||
taken
|
||||
);
|
||||
|
||||
// there should be no more tasks to get
|
||||
Assert.assertNull("poll queue with no tasks available", tq.poll());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testContinues() throws Exception
|
||||
{
|
||||
final TaskStorage ts = new HeapMemoryTaskStorage();
|
||||
final TaskLockbox tl = new TaskLockbox(ts);
|
||||
final TaskQueue tq = newTaskQueue(ts, tl);
|
||||
final TaskToolboxFactory tb = new TaskToolboxFactory(
|
||||
new TaskConfig(null, null, null, null),
|
||||
new LocalTaskActionClientFactory(ts, new TaskActionToolbox(tq, tl, null, null)),
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
new SegmentLoaderFactory(
|
||||
new OmniSegmentLoader(
|
||||
ImmutableMap.<String, DataSegmentPuller>of(
|
||||
"local",
|
||||
new LocalDataSegmentPuller()
|
||||
),
|
||||
null,
|
||||
new SegmentLoaderConfig()
|
||||
{
|
||||
@Override
|
||||
public List<StorageLocationConfig> getLocations()
|
||||
{
|
||||
return Lists.newArrayList();
|
||||
}
|
||||
}
|
||||
)
|
||||
),
|
||||
null
|
||||
);
|
||||
|
||||
final Task t0 = newTask("T0", "G0", "bar", new Interval("2011/P1Y"));
|
||||
final Task t1 = newContinuedTask("T1", "G1", "bar", new Interval("2013/P1Y"), Lists.newArrayList(t0));
|
||||
tq.add(t1);
|
||||
|
||||
Assert.assertTrue("T0 isPresent (#1)", !ts.getStatus("T0").isPresent());
|
||||
Assert.assertTrue("T1 isPresent (#1)", ts.getStatus("T1").isPresent());
|
||||
Assert.assertTrue("T1 isRunnable (#1)", ts.getStatus("T1").get().isRunnable());
|
||||
Assert.assertTrue("T1 isComplete (#1)", !ts.getStatus("T1").get().isComplete());
|
||||
|
||||
// should be able to get t1 out
|
||||
Assert.assertEquals("poll #1", "T1", tq.poll().getId());
|
||||
Assert.assertNull("poll #2", tq.poll());
|
||||
|
||||
// report T1 done. Should cause T0 to be created
|
||||
tq.notify(t1, t1.run(tb.build(t1)));
|
||||
|
||||
Assert.assertTrue("T0 isPresent (#2)", ts.getStatus("T0").isPresent());
|
||||
Assert.assertTrue("T0 isRunnable (#2)", ts.getStatus("T0").get().isRunnable());
|
||||
Assert.assertTrue("T0 isComplete (#2)", !ts.getStatus("T0").get().isComplete());
|
||||
Assert.assertTrue("T1 isPresent (#2)", ts.getStatus("T1").isPresent());
|
||||
Assert.assertTrue("T1 isRunnable (#2)", !ts.getStatus("T1").get().isRunnable());
|
||||
Assert.assertTrue("T1 isComplete (#2)", ts.getStatus("T1").get().isComplete());
|
||||
|
||||
// should be able to get t0 out
|
||||
Assert.assertEquals("poll #3", "T0", tq.poll().getId());
|
||||
Assert.assertNull("poll #4", tq.poll());
|
||||
|
||||
// report T0 done. Should cause T0, T1 to be marked complete
|
||||
tq.notify(t0, t0.run(tb.build(t0)));
|
||||
|
||||
Assert.assertTrue("T0 isPresent (#3)", ts.getStatus("T0").isPresent());
|
||||
Assert.assertTrue("T0 isRunnable (#3)", !ts.getStatus("T0").get().isRunnable());
|
||||
Assert.assertTrue("T0 isComplete (#3)", ts.getStatus("T0").get().isComplete());
|
||||
Assert.assertTrue("T1 isPresent (#3)", ts.getStatus("T1").isPresent());
|
||||
Assert.assertTrue("T1 isRunnable (#3)", !ts.getStatus("T1").get().isRunnable());
|
||||
Assert.assertTrue("T1 isComplete (#3)", ts.getStatus("T1").get().isComplete());
|
||||
|
||||
// should be no more events available for polling
|
||||
Assert.assertNull("poll #5", tq.poll());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testConcurrency() throws Exception
|
||||
{
|
||||
final TaskStorage ts = new HeapMemoryTaskStorage();
|
||||
final TaskLockbox tl = new TaskLockbox(ts);
|
||||
final TaskQueue tq = newTaskQueue(ts, tl);
|
||||
final TaskToolboxFactory tb = new TaskToolboxFactory(
|
||||
new TaskConfig(null, null, null, null),
|
||||
new LocalTaskActionClientFactory(ts, new TaskActionToolbox(tq, tl, null, null)),
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
new SegmentLoaderFactory(
|
||||
new OmniSegmentLoader(
|
||||
ImmutableMap.<String, DataSegmentPuller>of(
|
||||
"local",
|
||||
new LocalDataSegmentPuller()
|
||||
),
|
||||
null,
|
||||
new SegmentLoaderConfig()
|
||||
{
|
||||
@Override
|
||||
public List<StorageLocationConfig> getLocations()
|
||||
{
|
||||
return Lists.newArrayList();
|
||||
}
|
||||
}
|
||||
)
|
||||
),
|
||||
null
|
||||
);
|
||||
|
||||
// Imagine a larger task that splits itself up into pieces
|
||||
final Task t1 = newTask("T1", "G0", "bar", new Interval("2011-01-01/P1D"));
|
||||
final Task t2 = newTask("T2", "G1", "bar", new Interval("2011-01-02/P1D")); // Task group different from original
|
||||
final Task t3 = newTask("T3", "G0", "bar", new Interval("2011-01-03/P1D"));
|
||||
final Task t4 = newTask("T4", "G0", "bar", new Interval("2011-01-02/P5D")); // Interval wider than original
|
||||
final Task t0 = newContinuedTask(
|
||||
"T0",
|
||||
"G0",
|
||||
"bar",
|
||||
new Interval("2011-01-01/P3D"),
|
||||
ImmutableList.of(t1, t2, t3, t4)
|
||||
);
|
||||
|
||||
tq.add(t0);
|
||||
|
||||
final Task wt0 = tq.poll();
|
||||
final TaskLock wt0Lock = Iterables.getOnlyElement(tl.findLocksForTask(wt0));
|
||||
Assert.assertEquals("wt0 task id", "T0", wt0.getId());
|
||||
Assert.assertNull("null poll #1", tq.poll());
|
||||
|
||||
// Sleep a bit to avoid false test passes
|
||||
Thread.sleep(5);
|
||||
|
||||
// Finish t0
|
||||
tq.notify(t0, t0.run(tb.build(t0)));
|
||||
|
||||
// take max number of tasks
|
||||
final Set<String> taken = Sets.newHashSet();
|
||||
while (true) {
|
||||
|
||||
// Sleep a bit to avoid false test passes
|
||||
Thread.sleep(5);
|
||||
|
||||
final Task task = tq.poll();
|
||||
|
||||
if (task != null) {
|
||||
final TaskLock taskLock = Iterables.getOnlyElement(tl.findLocksForTask(task));
|
||||
Assert.assertEquals(
|
||||
String.format("%s version", task.getId()),
|
||||
wt0Lock.getVersion(),
|
||||
taskLock.getVersion()
|
||||
);
|
||||
taken.add(task.getId());
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
Assert.assertEquals("taken", Sets.newHashSet("T1", "T3"), taken);
|
||||
|
||||
// Finish t1
|
||||
tq.notify(t1, t1.run(null));
|
||||
Assert.assertNull("null poll #2", tq.poll());
|
||||
|
||||
// Finish t3
|
||||
tq.notify(t3, t3.run(tb.build(t3)));
|
||||
|
||||
// We should be able to get t2 now
|
||||
final Task wt2 = tq.poll();
|
||||
final TaskLock wt2Lock = Iterables.getOnlyElement(tl.findLocksForTask(wt2));
|
||||
Assert.assertEquals("wt2 task id", "T2", wt2.getId());
|
||||
Assert.assertEquals("wt2 group id", "G1", wt2.getGroupId());
|
||||
Assert.assertNotSame("wt2 version", wt0Lock.getVersion(), wt2Lock.getVersion());
|
||||
Assert.assertNull("null poll #3", tq.poll());
|
||||
|
||||
// Finish t2
|
||||
tq.notify(t2, t2.run(tb.build(t2)));
|
||||
|
||||
// We should be able to get t4
|
||||
// And it should be in group G0, but that group should have a different version than last time
|
||||
// (Since the previous transaction named "G0" has ended and transaction names are not necessarily tied to
|
||||
// one version if they end and are re-started)
|
||||
final Task wt4 = tq.poll();
|
||||
final TaskLock wt4Lock = Iterables.getOnlyElement(tl.findLocksForTask(wt4));
|
||||
Assert.assertEquals("wt4 task id", "T4", wt4.getId());
|
||||
Assert.assertEquals("wt4 group id", "G0", wt4.getGroupId());
|
||||
Assert.assertNotSame("wt4 version", wt0Lock.getVersion(), wt4Lock.getVersion());
|
||||
Assert.assertNotSame("wt4 version", wt2Lock.getVersion(), wt4Lock.getVersion());
|
||||
|
||||
// Kind of done testing at this point, but let's finish t4 anyway
|
||||
tq.notify(t4, t4.run(tb.build(t4)));
|
||||
Assert.assertNull("null poll #4", tq.poll());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBootstrap() throws Exception
|
||||
{
|
||||
final TaskStorage storage = new HeapMemoryTaskStorage();
|
||||
final TaskLockbox lockbox = new TaskLockbox(storage);
|
||||
|
||||
storage.insert(newTask("T1", "G1", "bar", new Interval("2011-01-01/P1D")), TaskStatus.running("T1"));
|
||||
storage.insert(newTask("T2", "G2", "bar", new Interval("2011-02-01/P1D")), TaskStatus.running("T2"));
|
||||
storage.addLock("T1", new TaskLock("G1", "bar", new Interval("2011-01-01/P1D"), "1234"));
|
||||
|
||||
final TaskQueue tq = newTaskQueue(storage, lockbox);
|
||||
|
||||
final Task vt1 = tq.poll();
|
||||
final TaskLock vt1Lock = Iterables.getOnlyElement(lockbox.findLocksForTask(vt1));
|
||||
Assert.assertEquals("vt1 id", "T1", vt1.getId());
|
||||
Assert.assertEquals("vt1 version", "1234", vt1Lock.getVersion());
|
||||
|
||||
tq.notify(vt1, TaskStatus.success("T1"));
|
||||
|
||||
// re-bootstrap
|
||||
tq.stop();
|
||||
storage.setStatus(TaskStatus.failure("T2"));
|
||||
tq.bootstrap();
|
||||
tq.start();
|
||||
|
||||
Assert.assertNull("null poll", tq.poll());
|
||||
}
|
||||
|
||||
private static Task newTask(final String id, final String groupId, final String dataSource, final Interval interval)
|
||||
{
|
||||
return new AbstractTask(id, groupId, dataSource, interval)
|
||||
{
|
||||
@Override
|
||||
public TaskStatus run(TaskToolbox toolbox) throws Exception
|
||||
{
|
||||
return TaskStatus.success(id);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getType()
|
||||
{
|
||||
return "null";
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
private static Task newContinuedTask(
|
||||
final String id,
|
||||
final String groupId,
|
||||
final String dataSource,
|
||||
final Interval interval,
|
||||
final List<Task> nextTasks
|
||||
)
|
||||
{
|
||||
return new AbstractTask(id, groupId, dataSource, interval)
|
||||
{
|
||||
@Override
|
||||
public String getType()
|
||||
{
|
||||
return "null";
|
||||
}
|
||||
|
||||
@Override
|
||||
public TaskStatus run(TaskToolbox toolbox) throws Exception
|
||||
{
|
||||
toolbox.getTaskActionClient().submit(new SpawnTasksAction(nextTasks));
|
||||
return TaskStatus.success(id);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
|
@ -126,13 +126,11 @@ public class EC2AutoScalingStrategyTest
|
|||
AutoScalingData created = strategy.provision();
|
||||
|
||||
Assert.assertEquals(created.getNodeIds().size(), 1);
|
||||
Assert.assertEquals(created.getNodes().size(), 1);
|
||||
Assert.assertEquals("theInstance", created.getNodeIds().get(0));
|
||||
|
||||
AutoScalingData deleted = strategy.terminate(Arrays.asList("dummyIP"));
|
||||
|
||||
Assert.assertEquals(deleted.getNodeIds().size(), 1);
|
||||
Assert.assertEquals(deleted.getNodes().size(), 1);
|
||||
Assert.assertEquals(String.format("%s:8080", IP), deleted.getNodeIds().get(0));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package io.druid.indexing.overlord.scaling;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
|
@ -28,6 +29,7 @@ import com.metamx.emitter.service.ServiceEventBuilder;
|
|||
import io.druid.common.guava.DSuppliers;
|
||||
import io.druid.indexing.common.TestMergeTask;
|
||||
import io.druid.indexing.common.TaskStatus;
|
||||
import io.druid.indexing.common.task.NoopTask;
|
||||
import io.druid.indexing.common.task.Task;
|
||||
import io.druid.indexing.overlord.RemoteTaskRunnerWorkItem;
|
||||
import io.druid.indexing.overlord.ZkWorker;
|
||||
|
@ -63,7 +65,7 @@ public class SimpleResourceManagementStrategyTest
|
|||
public void setUp() throws Exception
|
||||
{
|
||||
autoScalingStrategy = EasyMock.createMock(AutoScalingStrategy.class);
|
||||
workerSetupData = new AtomicReference<WorkerSetupData>(
|
||||
workerSetupData = new AtomicReference<>(
|
||||
new WorkerSetupData(
|
||||
"0", 0, 2, null, null, null
|
||||
)
|
||||
|
@ -105,13 +107,13 @@ public class SimpleResourceManagementStrategyTest
|
|||
EasyMock.expect(autoScalingStrategy.ipToIdLookup(EasyMock.<List<String>>anyObject()))
|
||||
.andReturn(Lists.<String>newArrayList());
|
||||
EasyMock.expect(autoScalingStrategy.provision()).andReturn(
|
||||
new AutoScalingData(Lists.<String>newArrayList(), Lists.newArrayList())
|
||||
new AutoScalingData(Lists.<String>newArrayList("aNode"))
|
||||
);
|
||||
EasyMock.replay(autoScalingStrategy);
|
||||
|
||||
boolean provisionedSomething = simpleResourceManagementStrategy.doProvision(
|
||||
Arrays.<RemoteTaskRunnerWorkItem>asList(
|
||||
new RemoteTaskRunnerWorkItem(testTask, null, null).withQueueInsertionTime(new DateTime())
|
||||
new RemoteTaskRunnerWorkItem(testTask.getId(), null, null).withQueueInsertionTime(new DateTime())
|
||||
),
|
||||
Arrays.<ZkWorker>asList(
|
||||
new TestZkWorker(testTask)
|
||||
|
@ -133,13 +135,13 @@ public class SimpleResourceManagementStrategyTest
|
|||
EasyMock.expect(autoScalingStrategy.ipToIdLookup(EasyMock.<List<String>>anyObject()))
|
||||
.andReturn(Lists.<String>newArrayList()).times(2);
|
||||
EasyMock.expect(autoScalingStrategy.provision()).andReturn(
|
||||
new AutoScalingData(Lists.<String>newArrayList("fake"), Lists.newArrayList("faker"))
|
||||
new AutoScalingData(Lists.<String>newArrayList("fake"))
|
||||
);
|
||||
EasyMock.replay(autoScalingStrategy);
|
||||
|
||||
boolean provisionedSomething = simpleResourceManagementStrategy.doProvision(
|
||||
Arrays.<RemoteTaskRunnerWorkItem>asList(
|
||||
new RemoteTaskRunnerWorkItem(testTask, null, null).withQueueInsertionTime(new DateTime())
|
||||
new RemoteTaskRunnerWorkItem(testTask.getId(), null, null).withQueueInsertionTime(new DateTime())
|
||||
),
|
||||
Arrays.<ZkWorker>asList(
|
||||
new TestZkWorker(testTask)
|
||||
|
@ -155,7 +157,7 @@ public class SimpleResourceManagementStrategyTest
|
|||
|
||||
provisionedSomething = simpleResourceManagementStrategy.doProvision(
|
||||
Arrays.<RemoteTaskRunnerWorkItem>asList(
|
||||
new RemoteTaskRunnerWorkItem(testTask, null, null).withQueueInsertionTime(new DateTime())
|
||||
new RemoteTaskRunnerWorkItem(testTask.getId(), null, null).withQueueInsertionTime(new DateTime())
|
||||
),
|
||||
Arrays.<ZkWorker>asList(
|
||||
new TestZkWorker(testTask)
|
||||
|
@ -190,13 +192,13 @@ public class SimpleResourceManagementStrategyTest
|
|||
EasyMock.expect(autoScalingStrategy.terminate(EasyMock.<List<String>>anyObject()))
|
||||
.andReturn(null);
|
||||
EasyMock.expect(autoScalingStrategy.provision()).andReturn(
|
||||
new AutoScalingData(Lists.<String>newArrayList("fake"), Lists.newArrayList("faker"))
|
||||
new AutoScalingData(Lists.<String>newArrayList("fake"))
|
||||
);
|
||||
EasyMock.replay(autoScalingStrategy);
|
||||
|
||||
boolean provisionedSomething = simpleResourceManagementStrategy.doProvision(
|
||||
Arrays.<RemoteTaskRunnerWorkItem>asList(
|
||||
new RemoteTaskRunnerWorkItem(testTask, null, null).withQueueInsertionTime(new DateTime())
|
||||
new RemoteTaskRunnerWorkItem(testTask.getId(), null, null).withQueueInsertionTime(new DateTime())
|
||||
),
|
||||
Arrays.<ZkWorker>asList(
|
||||
new TestZkWorker(testTask)
|
||||
|
@ -214,7 +216,7 @@ public class SimpleResourceManagementStrategyTest
|
|||
|
||||
provisionedSomething = simpleResourceManagementStrategy.doProvision(
|
||||
Arrays.<RemoteTaskRunnerWorkItem>asList(
|
||||
new RemoteTaskRunnerWorkItem(testTask, null, null).withQueueInsertionTime(new DateTime())
|
||||
new RemoteTaskRunnerWorkItem(testTask.getId(), null, null).withQueueInsertionTime(new DateTime())
|
||||
),
|
||||
Arrays.<ZkWorker>asList(
|
||||
new TestZkWorker(testTask)
|
||||
|
@ -242,13 +244,13 @@ public class SimpleResourceManagementStrategyTest
|
|||
EasyMock.expect(autoScalingStrategy.ipToIdLookup(EasyMock.<List<String>>anyObject()))
|
||||
.andReturn(Lists.<String>newArrayList());
|
||||
EasyMock.expect(autoScalingStrategy.terminate(EasyMock.<List<String>>anyObject())).andReturn(
|
||||
new AutoScalingData(Lists.<String>newArrayList(), Lists.newArrayList())
|
||||
new AutoScalingData(Lists.<String>newArrayList())
|
||||
);
|
||||
EasyMock.replay(autoScalingStrategy);
|
||||
|
||||
boolean terminatedSomething = simpleResourceManagementStrategy.doTerminate(
|
||||
Arrays.<RemoteTaskRunnerWorkItem>asList(
|
||||
new RemoteTaskRunnerWorkItem(testTask, null, null).withQueueInsertionTime(new DateTime())
|
||||
new RemoteTaskRunnerWorkItem(testTask.getId(), null, null).withQueueInsertionTime(new DateTime())
|
||||
),
|
||||
Arrays.<ZkWorker>asList(
|
||||
new TestZkWorker(null)
|
||||
|
@ -272,13 +274,13 @@ public class SimpleResourceManagementStrategyTest
|
|||
EasyMock.expect(autoScalingStrategy.ipToIdLookup(EasyMock.<List<String>>anyObject()))
|
||||
.andReturn(Lists.<String>newArrayList("ip")).times(2);
|
||||
EasyMock.expect(autoScalingStrategy.terminate(EasyMock.<List<String>>anyObject())).andReturn(
|
||||
new AutoScalingData(Lists.<String>newArrayList("ip"), Lists.newArrayList("ip"))
|
||||
new AutoScalingData(Lists.<String>newArrayList("ip"))
|
||||
);
|
||||
EasyMock.replay(autoScalingStrategy);
|
||||
|
||||
boolean terminatedSomething = simpleResourceManagementStrategy.doTerminate(
|
||||
Arrays.<RemoteTaskRunnerWorkItem>asList(
|
||||
new RemoteTaskRunnerWorkItem(testTask, null, null).withQueueInsertionTime(new DateTime())
|
||||
new RemoteTaskRunnerWorkItem(testTask.getId(), null, null).withQueueInsertionTime(new DateTime())
|
||||
),
|
||||
Arrays.<ZkWorker>asList(
|
||||
new TestZkWorker(null)
|
||||
|
@ -293,7 +295,7 @@ public class SimpleResourceManagementStrategyTest
|
|||
|
||||
terminatedSomething = simpleResourceManagementStrategy.doTerminate(
|
||||
Arrays.<RemoteTaskRunnerWorkItem>asList(
|
||||
new RemoteTaskRunnerWorkItem(testTask, null, null).withQueueInsertionTime(new DateTime())
|
||||
new RemoteTaskRunnerWorkItem(testTask.getId(), null, null).withQueueInsertionTime(new DateTime())
|
||||
),
|
||||
Arrays.<ZkWorker>asList(
|
||||
new TestZkWorker(null)
|
||||
|
@ -309,15 +311,174 @@ public class SimpleResourceManagementStrategyTest
|
|||
EasyMock.verify(autoScalingStrategy);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNoActionNeeded() throws Exception
|
||||
{
|
||||
EasyMock.reset(autoScalingStrategy);
|
||||
EasyMock.expect(autoScalingStrategy.ipToIdLookup(EasyMock.<List<String>>anyObject()))
|
||||
.andReturn(Lists.<String>newArrayList("ip"));
|
||||
EasyMock.replay(autoScalingStrategy);
|
||||
|
||||
boolean terminatedSomething = simpleResourceManagementStrategy.doTerminate(
|
||||
Arrays.<RemoteTaskRunnerWorkItem>asList(
|
||||
new RemoteTaskRunnerWorkItem(testTask.getId(), null, null).withQueueInsertionTime(new DateTime())
|
||||
),
|
||||
Arrays.<ZkWorker>asList(
|
||||
new TestZkWorker(NoopTask.create()),
|
||||
new TestZkWorker(NoopTask.create())
|
||||
)
|
||||
);
|
||||
|
||||
Assert.assertFalse(terminatedSomething);
|
||||
EasyMock.verify(autoScalingStrategy);
|
||||
|
||||
EasyMock.reset(autoScalingStrategy);
|
||||
EasyMock.expect(autoScalingStrategy.ipToIdLookup(EasyMock.<List<String>>anyObject()))
|
||||
.andReturn(Lists.<String>newArrayList("ip"));
|
||||
EasyMock.replay(autoScalingStrategy);
|
||||
|
||||
boolean provisionedSomething = simpleResourceManagementStrategy.doProvision(
|
||||
Arrays.<RemoteTaskRunnerWorkItem>asList(
|
||||
new RemoteTaskRunnerWorkItem(testTask.getId(), null, null).withQueueInsertionTime(new DateTime())
|
||||
),
|
||||
Arrays.<ZkWorker>asList(
|
||||
new TestZkWorker(NoopTask.create()),
|
||||
new TestZkWorker(NoopTask.create())
|
||||
)
|
||||
);
|
||||
|
||||
Assert.assertFalse(provisionedSomething);
|
||||
EasyMock.verify(autoScalingStrategy);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMinVersionIncrease() throws Exception
|
||||
{
|
||||
// Don't terminate anything
|
||||
EasyMock.reset(autoScalingStrategy);
|
||||
EasyMock.expect(autoScalingStrategy.ipToIdLookup(EasyMock.<List<String>>anyObject()))
|
||||
.andReturn(Lists.<String>newArrayList("ip"));
|
||||
EasyMock.replay(autoScalingStrategy);
|
||||
boolean terminatedSomething = simpleResourceManagementStrategy.doTerminate(
|
||||
Arrays.<RemoteTaskRunnerWorkItem>asList(),
|
||||
Arrays.<ZkWorker>asList(
|
||||
new TestZkWorker(NoopTask.create(), "h1", "i1", "0"),
|
||||
new TestZkWorker(NoopTask.create(), "h1", "i2", "0")
|
||||
)
|
||||
);
|
||||
Assert.assertFalse(terminatedSomething);
|
||||
EasyMock.verify(autoScalingStrategy);
|
||||
|
||||
// Don't provision anything
|
||||
EasyMock.reset(autoScalingStrategy);
|
||||
EasyMock.expect(autoScalingStrategy.ipToIdLookup(EasyMock.<List<String>>anyObject()))
|
||||
.andReturn(Lists.<String>newArrayList("ip"));
|
||||
EasyMock.replay(autoScalingStrategy);
|
||||
boolean provisionedSomething = simpleResourceManagementStrategy.doProvision(
|
||||
Arrays.<RemoteTaskRunnerWorkItem>asList(),
|
||||
Arrays.<ZkWorker>asList(
|
||||
new TestZkWorker(NoopTask.create()),
|
||||
new TestZkWorker(NoopTask.create())
|
||||
)
|
||||
);
|
||||
Assert.assertFalse(provisionedSomething);
|
||||
EasyMock.verify(autoScalingStrategy);
|
||||
|
||||
// Increase minVersion
|
||||
workerSetupData.set(new WorkerSetupData("1", 0, 2, null, null, null));
|
||||
|
||||
// Provision two new workers
|
||||
EasyMock.reset(autoScalingStrategy);
|
||||
EasyMock.expect(autoScalingStrategy.ipToIdLookup(EasyMock.<List<String>>anyObject()))
|
||||
.andReturn(Lists.<String>newArrayList("ip"));
|
||||
EasyMock.expect(autoScalingStrategy.provision()).andReturn(
|
||||
new AutoScalingData(Lists.<String>newArrayList("h3"))
|
||||
);
|
||||
EasyMock.expect(autoScalingStrategy.provision()).andReturn(
|
||||
new AutoScalingData(Lists.<String>newArrayList("h4"))
|
||||
);
|
||||
EasyMock.replay(autoScalingStrategy);
|
||||
provisionedSomething = simpleResourceManagementStrategy.doProvision(
|
||||
Arrays.<RemoteTaskRunnerWorkItem>asList(),
|
||||
Arrays.<ZkWorker>asList(
|
||||
new TestZkWorker(NoopTask.create(), "h1", "i1", "0"),
|
||||
new TestZkWorker(NoopTask.create(), "h2", "i2", "0")
|
||||
)
|
||||
);
|
||||
Assert.assertTrue(provisionedSomething);
|
||||
EasyMock.verify(autoScalingStrategy);
|
||||
|
||||
// Terminate old workers
|
||||
EasyMock.reset(autoScalingStrategy);
|
||||
EasyMock.expect(autoScalingStrategy.ipToIdLookup(ImmutableList.of("i1", "i2", "i3", "i4"))).andReturn(
|
||||
ImmutableList.of("h1", "h2", "h3", "h4")
|
||||
);
|
||||
EasyMock.expect(autoScalingStrategy.terminate(ImmutableList.of("i1", "i2"))).andReturn(
|
||||
new AutoScalingData(ImmutableList.of("h1", "h2"))
|
||||
);
|
||||
EasyMock.replay(autoScalingStrategy);
|
||||
terminatedSomething = simpleResourceManagementStrategy.doTerminate(
|
||||
Arrays.<RemoteTaskRunnerWorkItem>asList(),
|
||||
Arrays.<ZkWorker>asList(
|
||||
new TestZkWorker(null, "h1", "i1", "0"),
|
||||
new TestZkWorker(null, "h2", "i2", "0"),
|
||||
new TestZkWorker(NoopTask.create(), "h3", "i3", "1"),
|
||||
new TestZkWorker(NoopTask.create(), "h4", "i4", "1")
|
||||
)
|
||||
);
|
||||
Assert.assertTrue(terminatedSomething);
|
||||
EasyMock.verify(autoScalingStrategy);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNullWorkerSetupData() throws Exception
|
||||
{
|
||||
workerSetupData.set(null);
|
||||
EasyMock.replay(autoScalingStrategy);
|
||||
|
||||
boolean terminatedSomething = simpleResourceManagementStrategy.doTerminate(
|
||||
Arrays.<RemoteTaskRunnerWorkItem>asList(
|
||||
new RemoteTaskRunnerWorkItem(testTask.getId(), null, null).withQueueInsertionTime(new DateTime())
|
||||
),
|
||||
Arrays.<ZkWorker>asList(
|
||||
new TestZkWorker(null)
|
||||
)
|
||||
);
|
||||
|
||||
boolean provisionedSomething = simpleResourceManagementStrategy.doProvision(
|
||||
Arrays.<RemoteTaskRunnerWorkItem>asList(
|
||||
new RemoteTaskRunnerWorkItem(testTask.getId(), null, null).withQueueInsertionTime(new DateTime())
|
||||
),
|
||||
Arrays.<ZkWorker>asList(
|
||||
new TestZkWorker(null)
|
||||
)
|
||||
);
|
||||
|
||||
Assert.assertFalse(terminatedSomething);
|
||||
Assert.assertFalse(provisionedSomething);
|
||||
|
||||
EasyMock.verify(autoScalingStrategy);
|
||||
}
|
||||
|
||||
private static class TestZkWorker extends ZkWorker
|
||||
{
|
||||
private final Task testTask;
|
||||
|
||||
private TestZkWorker(
|
||||
public TestZkWorker(
|
||||
Task testTask
|
||||
)
|
||||
{
|
||||
super(new Worker("host", "ip", 3, "version"), null, new DefaultObjectMapper());
|
||||
this(testTask, "host", "ip", "0");
|
||||
}
|
||||
|
||||
public TestZkWorker(
|
||||
Task testTask,
|
||||
String host,
|
||||
String ip,
|
||||
String version
|
||||
)
|
||||
{
|
||||
super(new Worker(host, ip, 3, version), null, new DefaultObjectMapper());
|
||||
|
||||
this.testTask = testTask;
|
||||
}
|
||||
|
|
|
@ -122,7 +122,7 @@ public class WorkerTaskMonitorTest
|
|||
new ThreadPoolTaskRunner(
|
||||
new TaskToolboxFactory(
|
||||
new TaskConfig(tmp.toString(), null, null, 0),
|
||||
null, null, null, null, null, null, null, null, null, new SegmentLoaderFactory(
|
||||
null, null, null, null, null, null, null, null, null, null, null, new SegmentLoaderFactory(
|
||||
new OmniSegmentLoader(
|
||||
ImmutableMap.<String, DataSegmentPuller>of(
|
||||
"local",
|
||||
|
@ -209,4 +209,4 @@ public class WorkerTaskMonitorTest
|
|||
Assert.assertEquals(task.getId(), taskAnnouncement.getTaskStatus().getId());
|
||||
Assert.assertEquals(TaskStatus.Status.RUNNING, taskAnnouncement.getTaskStatus().getStatusCode());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,12 +28,10 @@
|
|||
<parent>
|
||||
<groupId>io.druid</groupId>
|
||||
<artifactId>druid</artifactId>
|
||||
<version>0.6.27-SNAPSHOT</version>
|
||||
<version>0.6.48-SNAPSHOT</version>
|
||||
</parent>
|
||||
|
||||
<dependencies>
|
||||
|
||||
|
||||
<dependency>
|
||||
<groupId>io.druid</groupId>
|
||||
<artifactId>druid-api</artifactId>
|
||||
|
@ -41,7 +39,7 @@
|
|||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.9.2</artifactId>
|
||||
<version>0.8.0-beta1</version>
|
||||
<version>0.8.0</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>log4j</groupId>
|
||||
|
@ -54,61 +52,6 @@
|
|||
</exclusions>
|
||||
</dependency>
|
||||
|
||||
<!-- kafka_2.9.2 0.8.0-beta1 is bad, it is not correctly pulling dependencies, do it manually here -->
|
||||
<dependency>
|
||||
<groupId>org.scala-lang</groupId>
|
||||
<artifactId>scala-library</artifactId>
|
||||
<version>2.9.2</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>net.sf.jopt-simple</groupId>
|
||||
<artifactId>jopt-simple</artifactId>
|
||||
<version>3.2</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-simple</artifactId>
|
||||
<version>1.6.4</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.scala-lang</groupId>
|
||||
<artifactId>scala-compiler</artifactId>
|
||||
<version>2.9.2</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.101tec</groupId>
|
||||
<artifactId>zkclient</artifactId>
|
||||
<version>0.3</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.xerial.snappy</groupId>
|
||||
<artifactId>snappy-java</artifactId>
|
||||
<version>1.0.4.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.yammer.metrics</groupId>
|
||||
<artifactId>metrics-core</artifactId>
|
||||
<version>2.2.0</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.yammer.metrics</groupId>
|
||||
<artifactId>metrics-annotation</artifactId>
|
||||
<version>2.2.0</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.easymock</groupId>
|
||||
<artifactId>easymock</artifactId>
|
||||
<version>3.0</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.scalatest</groupId>
|
||||
<artifactId>scalatest_2.9.2</artifactId>
|
||||
<version>1.8</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
|
||||
|
||||
<!-- Tests -->
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
|
@ -116,4 +59,20 @@
|
|||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<artifactId>maven-jar-plugin</artifactId>
|
||||
<configuration>
|
||||
<archive>
|
||||
<manifest>
|
||||
<addDefaultImplementationEntries>true</addDefaultImplementationEntries>
|
||||
<addDefaultSpecificationEntries>true</addDefaultSpecificationEntries>
|
||||
</manifest>
|
||||
</archive>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</project>
|
||||
|
|
|
@ -107,7 +107,15 @@ public class KafkaEightFirehoseFactory implements FirehoseFactory
|
|||
return null;
|
||||
}
|
||||
|
||||
return parser.parse(ByteBuffer.wrap(message));
|
||||
try {
|
||||
return parser.parse(ByteBuffer.wrap(message));
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new FormattedException.Builder()
|
||||
.withErrorCode(FormattedException.ErrorCode.UNPARSABLE_ROW)
|
||||
.withMessage(String.format("Error parsing[%s], got [%s]", ByteBuffer.wrap(message), e.toString()))
|
||||
.build();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
<parent>
|
||||
<groupId>io.druid</groupId>
|
||||
<artifactId>druid</artifactId>
|
||||
<version>0.6.27-SNAPSHOT</version>
|
||||
<version>0.6.48-SNAPSHOT</version>
|
||||
</parent>
|
||||
|
||||
<dependencies>
|
||||
|
@ -55,4 +55,19 @@
|
|||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<artifactId>maven-jar-plugin</artifactId>
|
||||
<configuration>
|
||||
<archive>
|
||||
<manifest>
|
||||
<addDefaultImplementationEntries>true</addDefaultImplementationEntries>
|
||||
<addDefaultSpecificationEntries>true</addDefaultSpecificationEntries>
|
||||
</manifest>
|
||||
</archive>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</project>
|
||||
|
|
|
@ -120,7 +120,15 @@ public class KafkaSevenFirehoseFactory implements FirehoseFactory
|
|||
|
||||
public InputRow parseMessage(Message message) throws FormattedException
|
||||
{
|
||||
return parser.parse(message.payload());
|
||||
try {
|
||||
return parser.parse(message.payload());
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new FormattedException.Builder()
|
||||
.withErrorCode(FormattedException.ErrorCode.UNPARSABLE_ROW)
|
||||
.withMessage(String.format("Error parsing[%s], got [%s]", message.payload(), e.toString()))
|
||||
.build();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue