diff --git a/cassandra-storage/pom.xml b/cassandra-storage/pom.xml
index cb2de923923..754e227b4c1 100644
--- a/cassandra-storage/pom.xml
+++ b/cassandra-storage/pom.xml
@@ -28,7 +28,7 @@
io.druid
druid
- 0.6.117-SNAPSHOT
+ 0.6.121-SNAPSHOT
diff --git a/common/pom.xml b/common/pom.xml
index 4d05afd6443..e76f3945f2e 100644
--- a/common/pom.xml
+++ b/common/pom.xml
@@ -28,7 +28,7 @@
io.druid
druid
- 0.6.117-SNAPSHOT
+ 0.6.121-SNAPSHOT
diff --git a/docs/content/Examples.md b/docs/content/Examples.md
index ca1e15fa078..268f655bc05 100644
--- a/docs/content/Examples.md
+++ b/docs/content/Examples.md
@@ -19,13 +19,13 @@ Clone Druid and build it:
git clone https://github.com/metamx/druid.git druid
cd druid
git fetch --tags
-git checkout druid-0.6.116
+git checkout druid-0.6.119
./build.sh
```
### Downloading the DSK (Druid Standalone Kit)
-[Download](http://static.druid.io/artifacts/releases/druid-services-0.6.116-bin.tar.gz) a stand-alone tarball and run it:
+[Download](http://static.druid.io/artifacts/releases/druid-services-0.6.119-bin.tar.gz) a stand-alone tarball and run it:
``` bash
tar -xzf druid-services-0.X.X-bin.tar.gz
diff --git a/docs/content/Performance-FAQ.md b/docs/content/Performance-FAQ.md
index 8bc696d6825..c4bd7363a87 100644
--- a/docs/content/Performance-FAQ.md
+++ b/docs/content/Performance-FAQ.md
@@ -5,9 +5,9 @@ layout: doc_page
## What should I set my JVM heap?
The size of the JVM heap really depends on the type of Druid node you are running. Below are a few considerations.
-[Broker nodes](Broker.html) can use the JVM heap as a query cache and thus, the size of the heap will affect on the number of results that can be cached. Broker nodes do not require off-heap memory and generally, heap sizes can be set to be close to the maximum memory on the machine (leaving some room for JVM overhead). The heap is used to merge results from different real-time and historical nodes, along with other computational processing.
+[Broker nodes](Broker.html) uses the JVM heap mainly to merge results from historicals and real-times. Brokers also use off-heap memory and processing threads for groupBy queries.
-[Historical nodes](Historical.html) use off-heap memory to store intermediate results, and by default, all segments are memory mapped before they can be queried. The more off-heap memory is available, the more segments can be served without the possibility of data being paged onto disk. On historicals, the JVM heap is used for [GroupBy queries](GroupByQuery.html), some data structures used for intermediate computation, and general processing.
+[Historical nodes](Historical.html) use off-heap memory to store intermediate results, and by default, all segments are memory mapped before they can be queried. Typically, the more memory is available on a historical node, the more segments can be served without the possibility of data being paged on to disk. On historicals, the JVM heap is used for [GroupBy queries](GroupByQuery.html), some data structures used for intermediate computation, and general processing. One way to calculate how much space there is for segments is: memory_for_segments = total_memory - heap - direct_memory - jvm_overhead.
[Coordinator nodes](Coordinator nodes) do not require off-heap memory and the heap is used for loading information about all segments to determine what segments need to be loaded, dropped, moved, or replicated.
diff --git a/docs/content/Production-Cluster-Configuration.md b/docs/content/Production-Cluster-Configuration.md
index 21dda2c3a92..413cfd597c4 100644
--- a/docs/content/Production-Cluster-Configuration.md
+++ b/docs/content/Production-Cluster-Configuration.md
@@ -55,7 +55,7 @@ druid.host=#{IP_ADDR}:8080
druid.port=8080
druid.service=druid/prod/overlord
-druid.extensions.coordinates=["io.druid.extensions:druid-s3-extensions:0.6.116"]
+druid.extensions.coordinates=["io.druid.extensions:druid-s3-extensions:0.6.119"]
druid.zk.service.host=#{ZK_IPs}
druid.zk.paths.base=/druid/prod
@@ -137,7 +137,7 @@ druid.host=#{IP_ADDR}:8080
druid.port=8080
druid.service=druid/prod/middlemanager
-druid.extensions.coordinates=["io.druid.extensions:druid-s3-extensions:0.6.116","io.druid.extensions:druid-kafka-seven:0.6.116"]
+druid.extensions.coordinates=["io.druid.extensions:druid-s3-extensions:0.6.119","io.druid.extensions:druid-kafka-seven:0.6.119"]
druid.zk.service.host=#{ZK_IPs}
druid.zk.paths.base=/druid/prod
@@ -285,7 +285,7 @@ druid.host=#{IP_ADDR}:8080
druid.port=8080
druid.service=druid/prod/historical
-druid.extensions.coordinates=["io.druid.extensions:druid-s3-extensions:0.6.116"]
+druid.extensions.coordinates=["io.druid.extensions:druid-s3-extensions:0.6.119"]
druid.zk.service.host=#{ZK_IPs}
druid.zk.paths.base=/druid/prod
diff --git a/docs/content/Realtime-Config.md b/docs/content/Realtime-Config.md
index c252ed13da5..62dfcf4a21f 100644
--- a/docs/content/Realtime-Config.md
+++ b/docs/content/Realtime-Config.md
@@ -27,7 +27,7 @@ druid.host=localhost
druid.service=realtime
druid.port=8083
-druid.extensions.coordinates=["io.druid.extensions:druid-kafka-seven:0.6.116"]
+druid.extensions.coordinates=["io.druid.extensions:druid-kafka-seven:0.6.119"]
druid.zk.service.host=localhost
@@ -76,7 +76,7 @@ druid.host=#{IP_ADDR}:8080
druid.port=8080
druid.service=druid/prod/realtime
-druid.extensions.coordinates=["io.druid.extensions:druid-s3-extensions:0.6.116","io.druid.extensions:druid-kafka-seven:0.6.116"]
+druid.extensions.coordinates=["io.druid.extensions:druid-s3-extensions:0.6.119","io.druid.extensions:druid-kafka-seven:0.6.119"]
druid.zk.service.host=#{ZK_IPs}
druid.zk.paths.base=/druid/prod
diff --git a/docs/content/Simple-Cluster-Configuration.md b/docs/content/Simple-Cluster-Configuration.md
index f941031861c..3ea9f9b20a7 100644
--- a/docs/content/Simple-Cluster-Configuration.md
+++ b/docs/content/Simple-Cluster-Configuration.md
@@ -28,7 +28,7 @@ Configuration:
-Ddruid.zk.service.host=localhost
--Ddruid.extensions.coordinates=["io.druid.extensions:druid-kafka-seven:0.6.116"]
+-Ddruid.extensions.coordinates=["io.druid.extensions:druid-kafka-seven:0.6.119"]
-Ddruid.db.connector.connectURI=jdbc:mysql://localhost:3306/druid
-Ddruid.db.connector.user=druid
diff --git a/docs/content/Tutorial:-A-First-Look-at-Druid.md b/docs/content/Tutorial:-A-First-Look-at-Druid.md
index 970e504c25b..a27d7277e2d 100644
--- a/docs/content/Tutorial:-A-First-Look-at-Druid.md
+++ b/docs/content/Tutorial:-A-First-Look-at-Druid.md
@@ -49,7 +49,7 @@ There are two ways to setup Druid: download a tarball, or [Build From Source](Bu
### Download a Tarball
-We've built a tarball that contains everything you'll need. You'll find it [here](http://static.druid.io/artifacts/releases/druid-services-0.6.116-bin.tar.gz). Download this file to a directory of your choosing.
+We've built a tarball that contains everything you'll need. You'll find it [here](http://static.druid.io/artifacts/releases/druid-services-0.6.119-bin.tar.gz). Download this file to a directory of your choosing.
You can extract the awesomeness within by issuing:
@@ -60,7 +60,7 @@ tar -zxvf druid-services-*-bin.tar.gz
Not too lost so far right? That's great! If you cd into the directory:
```
-cd druid-services-0.6.116
+cd druid-services-0.6.119
```
You should see a bunch of files:
diff --git a/docs/content/Tutorial:-Loading-Your-Data-Part-1.md b/docs/content/Tutorial:-Loading-Your-Data-Part-1.md
index 0d06e94aa8b..0427550eeed 100644
--- a/docs/content/Tutorial:-Loading-Your-Data-Part-1.md
+++ b/docs/content/Tutorial:-Loading-Your-Data-Part-1.md
@@ -96,7 +96,7 @@ The configurations for the overlord node are as follows:
-Ddruid.zk.service.host=localhost
--Ddruid.extensions.coordinates=["io.druid.extensions:druid-kafka-seven:0.6.116"]
+-Ddruid.extensions.coordinates=["io.druid.extensions:druid-kafka-seven:0.6.119"]
-Ddruid.db.connector.connectURI=jdbc:mysql://localhost:3306/druid
-Ddruid.db.connector.user=druid
diff --git a/docs/content/Tutorial:-The-Druid-Cluster.md b/docs/content/Tutorial:-The-Druid-Cluster.md
index 23edc085c57..d63f7d70e6f 100644
--- a/docs/content/Tutorial:-The-Druid-Cluster.md
+++ b/docs/content/Tutorial:-The-Druid-Cluster.md
@@ -13,7 +13,7 @@ In this tutorial, we will set up other types of Druid nodes and external depende
If you followed the first tutorial, you should already have Druid downloaded. If not, let's go back and do that first.
-You can download the latest version of druid [here](http://static.druid.io/artifacts/releases/druid-services-0.6.116-bin.tar.gz)
+You can download the latest version of druid [here](http://static.druid.io/artifacts/releases/druid-services-0.6.119-bin.tar.gz)
and untar the contents within by issuing:
@@ -149,7 +149,7 @@ druid.port=8081
druid.zk.service.host=localhost
-druid.extensions.coordinates=["io.druid.extensions:druid-s3-extensions:0.6.116"]
+druid.extensions.coordinates=["io.druid.extensions:druid-s3-extensions:0.6.119"]
# Dummy read only AWS account (used to download example data)
druid.s3.secretKey=QyyfVZ7llSiRg6Qcrql1eEUG7buFpAK6T6engr1b
@@ -240,7 +240,7 @@ druid.port=8083
druid.zk.service.host=localhost
-druid.extensions.coordinates=["io.druid.extensions:druid-examples:0.6.116","io.druid.extensions:druid-kafka-seven:0.6.116"]
+druid.extensions.coordinates=["io.druid.extensions:druid-examples:0.6.119","io.druid.extensions:druid-kafka-seven:0.6.119"]
# Change this config to db to hand off to the rest of the Druid cluster
druid.publish.type=noop
diff --git a/docs/content/Tutorial:-Webstream.md b/docs/content/Tutorial:-Webstream.md
index 18257ab78b4..abd0edda56f 100644
--- a/docs/content/Tutorial:-Webstream.md
+++ b/docs/content/Tutorial:-Webstream.md
@@ -37,7 +37,7 @@ There are two ways to setup Druid: download a tarball, or [Build From Source](Bu
h3. Download a Tarball
-We've built a tarball that contains everything you'll need. You'll find it [here](http://static.druid.io/artifacts/releases/druid-services-0.6.116-bin.tar.gz)
+We've built a tarball that contains everything you'll need. You'll find it [here](http://static.druid.io/artifacts/releases/druid-services-0.6.119-bin.tar.gz)
Download this file to a directory of your choosing.
You can extract the awesomeness within by issuing:
@@ -48,7 +48,7 @@ tar zxvf druid-services-*-bin.tar.gz
Not too lost so far right? That's great! If you cd into the directory:
```
-cd druid-services-0.6.116
+cd druid-services-0.6.119
```
You should see a bunch of files:
diff --git a/docs/content/Twitter-Tutorial.md b/docs/content/Twitter-Tutorial.md
index e3b1bdddd9d..857c32f6256 100644
--- a/docs/content/Twitter-Tutorial.md
+++ b/docs/content/Twitter-Tutorial.md
@@ -9,7 +9,7 @@ There are two ways to setup Druid: download a tarball, or build it from source.
# Download a Tarball
-We've built a tarball that contains everything you'll need. You'll find it [here](http://static.druid.io/artifacts/releases/druid-services-0.6.116-bin.tar.gz).
+We've built a tarball that contains everything you'll need. You'll find it [here](http://static.druid.io/artifacts/releases/druid-services-0.6.119-bin.tar.gz).
Download this bad boy to a directory of your choosing.
You can extract the awesomeness within by issuing:
diff --git a/examples/config/historical/runtime.properties b/examples/config/historical/runtime.properties
index 51d19e89af6..f86944fd020 100644
--- a/examples/config/historical/runtime.properties
+++ b/examples/config/historical/runtime.properties
@@ -4,7 +4,7 @@ druid.port=8081
druid.zk.service.host=localhost
-druid.extensions.coordinates=["io.druid.extensions:druid-s3-extensions:0.6.116"]
+druid.extensions.coordinates=["io.druid.extensions:druid-s3-extensions:0.6.119"]
# Dummy read only AWS account (used to download example data)
druid.s3.secretKey=QyyfVZ7llSiRg6Qcrql1eEUG7buFpAK6T6engr1b
diff --git a/examples/config/overlord/runtime.properties b/examples/config/overlord/runtime.properties
index 24745c3147f..7a8582ed4d3 100644
--- a/examples/config/overlord/runtime.properties
+++ b/examples/config/overlord/runtime.properties
@@ -9,7 +9,7 @@
-Ddruid.zk.service.host=localhost
--Ddruid.extensions.coordinates=["io.druid.extensions:druid-kafka-seven:0.6.116"]
+-Ddruid.extensions.coordinates=["io.druid.extensions:druid-kafka-seven:0.6.119"]
-Ddruid.db.connector.connectURI=jdbc:mysql://localhost:3306/druid
-Ddruid.db.connector.user=druid
diff --git a/examples/config/realtime/runtime.properties b/examples/config/realtime/runtime.properties
index b78547fe8ab..43e96fbe086 100644
--- a/examples/config/realtime/runtime.properties
+++ b/examples/config/realtime/runtime.properties
@@ -4,7 +4,7 @@ druid.port=8083
druid.zk.service.host=localhost
-druid.extensions.coordinates=["io.druid.extensions:druid-examples:0.6.116","io.druid.extensions:druid-kafka-seven:0.6.116","io.druid.extensions:druid-rabbitmq:0.6.116"]
+druid.extensions.coordinates=["io.druid.extensions:druid-examples:0.6.119","io.druid.extensions:druid-kafka-seven:0.6.119","io.druid.extensions:druid-rabbitmq:0.6.119"]
# Change this config to db to hand off to the rest of the Druid cluster
druid.publish.type=noop
diff --git a/examples/pom.xml b/examples/pom.xml
index a7d6f87c1d0..7a240100744 100644
--- a/examples/pom.xml
+++ b/examples/pom.xml
@@ -28,7 +28,7 @@
io.druid
druid
- 0.6.117-SNAPSHOT
+ 0.6.121-SNAPSHOT
diff --git a/hdfs-storage/pom.xml b/hdfs-storage/pom.xml
index 23a1294395d..9de4f7a5b97 100644
--- a/hdfs-storage/pom.xml
+++ b/hdfs-storage/pom.xml
@@ -28,7 +28,7 @@
io.druid
druid
- 0.6.117-SNAPSHOT
+ 0.6.121-SNAPSHOT
diff --git a/indexing-hadoop/pom.xml b/indexing-hadoop/pom.xml
index 81c4f32d85f..7b04b613762 100644
--- a/indexing-hadoop/pom.xml
+++ b/indexing-hadoop/pom.xml
@@ -28,7 +28,7 @@
io.druid
druid
- 0.6.117-SNAPSHOT
+ 0.6.121-SNAPSHOT
diff --git a/indexing-hadoop/src/main/java/io/druid/indexer/DetermineHashedPartitionsJob.java b/indexing-hadoop/src/main/java/io/druid/indexer/DetermineHashedPartitionsJob.java
index 82643065123..4bc9cb9f35a 100644
--- a/indexing-hadoop/src/main/java/io/druid/indexer/DetermineHashedPartitionsJob.java
+++ b/indexing-hadoop/src/main/java/io/druid/indexer/DetermineHashedPartitionsJob.java
@@ -33,8 +33,8 @@ import com.metamx.common.logger.Logger;
import io.druid.data.input.InputRow;
import io.druid.data.input.Rows;
import io.druid.granularity.QueryGranularity;
-import io.druid.segment.indexing.granularity.UniformGranularitySpec;
import io.druid.query.aggregation.hyperloglog.HyperLogLogCollector;
+import io.druid.segment.indexing.granularity.UniformGranularitySpec;
import io.druid.timeline.partition.HashBasedNumberedShardSpec;
import io.druid.timeline.partition.NoneShardSpec;
import org.apache.hadoop.conf.Configurable;
@@ -48,8 +48,8 @@ import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.Reducer;
-import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.input.CombineTextInputFormat;
+import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
import org.joda.time.DateTime;
@@ -92,7 +92,11 @@ public class DetermineHashedPartitionsJob implements Jobby
);
JobHelper.injectSystemProperties(groupByJob);
- groupByJob.setInputFormatClass(CombineTextInputFormat.class);
+ if (config.isCombineText()) {
+ groupByJob.setInputFormatClass(CombineTextInputFormat.class);
+ } else {
+ groupByJob.setInputFormatClass(TextInputFormat.class);
+ }
groupByJob.setMapperClass(DetermineCardinalityMapper.class);
groupByJob.setMapOutputKeyClass(LongWritable.class);
groupByJob.setMapOutputValueClass(BytesWritable.class);
diff --git a/indexing-hadoop/src/main/java/io/druid/indexer/HadoopDruidIndexerConfig.java b/indexing-hadoop/src/main/java/io/druid/indexer/HadoopDruidIndexerConfig.java
index 5956287f5d0..0a863b78c13 100644
--- a/indexing-hadoop/src/main/java/io/druid/indexer/HadoopDruidIndexerConfig.java
+++ b/indexing-hadoop/src/main/java/io/druid/indexer/HadoopDruidIndexerConfig.java
@@ -37,7 +37,6 @@ import com.google.inject.Module;
import com.metamx.common.ISE;
import com.metamx.common.guava.FunctionalIterable;
import com.metamx.common.logger.Logger;
-import com.metamx.common.parsers.TimestampParser;
import io.druid.common.utils.JodaUtils;
import io.druid.data.input.InputRow;
import io.druid.data.input.impl.StringInputRowParser;
@@ -111,7 +110,17 @@ public class HadoopDruidIndexerConfig
public static HadoopDruidIndexerConfig fromMap(Map argSpec)
{
- return HadoopDruidIndexerConfig.jsonMapper.convertValue(argSpec, HadoopDruidIndexerConfig.class);
+ //backwards compatibility
+ if (argSpec.containsKey("schema")) {
+ return HadoopDruidIndexerConfig.jsonMapper.convertValue(argSpec, HadoopDruidIndexerConfig.class);
+ } else {
+ return new HadoopDruidIndexerConfig(
+ HadoopDruidIndexerConfig.jsonMapper.convertValue(
+ argSpec,
+ HadoopIngestionSpec.class
+ )
+ );
+ }
}
@SuppressWarnings("unchecked")
@@ -246,6 +255,11 @@ public class HadoopDruidIndexerConfig
return (schema.getIOConfig().getMetadataUpdateSpec() != null);
}
+ public boolean isCombineText()
+ {
+ return schema.getTuningConfig().isCombineText();
+ }
+
public StringInputRowParser getParser()
{
return (StringInputRowParser) schema.getDataSchema().getParser();
diff --git a/indexing-hadoop/src/main/java/io/druid/indexer/HadoopIngestionSpec.java b/indexing-hadoop/src/main/java/io/druid/indexer/HadoopIngestionSpec.java
index 2ebd1b73b6b..b9947e81fe9 100644
--- a/indexing-hadoop/src/main/java/io/druid/indexer/HadoopIngestionSpec.java
+++ b/indexing-hadoop/src/main/java/io/druid/indexer/HadoopIngestionSpec.java
@@ -73,6 +73,7 @@ public class HadoopIngestionSpec extends IngestionSpec jobProperties,
+ final @JsonProperty("combineText") boolean combineText,
// These fields are deprecated and will be removed in the future
final @JsonProperty("timestampColumn") String timestampColumn,
final @JsonProperty("timestampFormat") String timestampFormat,
@@ -138,7 +139,7 @@ public class HadoopIngestionSpec extends IngestionSpec jobProperties;
+ private final boolean combineText;
@JsonCreator
public HadoopTuningConfig(
@@ -78,7 +80,8 @@ public class HadoopTuningConfig implements TuningConfig
final @JsonProperty("cleanupOnFailure") Boolean cleanupOnFailure,
final @JsonProperty("overwriteFiles") boolean overwriteFiles,
final @JsonProperty("ignoreInvalidRows") boolean ignoreInvalidRows,
- final @JsonProperty("jobProperties") Map jobProperties
+ final @JsonProperty("jobProperties") Map jobProperties,
+ final @JsonProperty("combineText") boolean combineText
)
{
this.workingPath = workingPath == null ? null : workingPath;
@@ -93,6 +96,7 @@ public class HadoopTuningConfig implements TuningConfig
this.jobProperties = (jobProperties == null
? ImmutableMap.of()
: ImmutableMap.copyOf(jobProperties));
+ this.combineText = combineText;
}
@JsonProperty
@@ -155,6 +159,12 @@ public class HadoopTuningConfig implements TuningConfig
return jobProperties;
}
+ @JsonProperty
+ public boolean isCombineText()
+ {
+ return combineText;
+ }
+
public HadoopTuningConfig withWorkingPath(String path)
{
return new HadoopTuningConfig(
@@ -167,7 +177,8 @@ public class HadoopTuningConfig implements TuningConfig
cleanupOnFailure,
overwriteFiles,
ignoreInvalidRows,
- jobProperties
+ jobProperties,
+ combineText
);
}
@@ -183,7 +194,8 @@ public class HadoopTuningConfig implements TuningConfig
cleanupOnFailure,
overwriteFiles,
ignoreInvalidRows,
- jobProperties
+ jobProperties,
+ combineText
);
}
@@ -199,7 +211,8 @@ public class HadoopTuningConfig implements TuningConfig
cleanupOnFailure,
overwriteFiles,
ignoreInvalidRows,
- jobProperties
+ jobProperties,
+ combineText
);
}
}
diff --git a/indexing-hadoop/src/main/java/io/druid/indexer/IndexGeneratorJob.java b/indexing-hadoop/src/main/java/io/druid/indexer/IndexGeneratorJob.java
index db0c9568dac..d867dc8fe9a 100644
--- a/indexing-hadoop/src/main/java/io/druid/indexer/IndexGeneratorJob.java
+++ b/indexing-hadoop/src/main/java/io/druid/indexer/IndexGeneratorJob.java
@@ -62,6 +62,7 @@ import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.CombineTextInputFormat;
+import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.joda.time.DateTime;
@@ -146,7 +147,11 @@ public class IndexGeneratorJob implements Jobby
JobHelper.injectSystemProperties(job);
- job.setInputFormatClass(CombineTextInputFormat.class);
+ if (config.isCombineText()) {
+ job.setInputFormatClass(CombineTextInputFormat.class);
+ } else {
+ job.setInputFormatClass(TextInputFormat.class);
+ }
job.setMapperClass(IndexGeneratorMapper.class);
job.setMapOutputValueClass(Text.class);
diff --git a/indexing-service/pom.xml b/indexing-service/pom.xml
index 810d82e3e63..5b83d487159 100644
--- a/indexing-service/pom.xml
+++ b/indexing-service/pom.xml
@@ -28,7 +28,7 @@
io.druid
druid
- 0.6.117-SNAPSHOT
+ 0.6.121-SNAPSHOT
diff --git a/indexing-service/src/test/java/io/druid/indexing/common/task/TaskSerdeTest.java b/indexing-service/src/test/java/io/druid/indexing/common/task/TaskSerdeTest.java
index f0feae9490a..00407381afc 100644
--- a/indexing-service/src/test/java/io/druid/indexing/common/task/TaskSerdeTest.java
+++ b/indexing-service/src/test/java/io/druid/indexing/common/task/TaskSerdeTest.java
@@ -422,6 +422,7 @@ public class TaskSerdeTest
null,
false,
ImmutableMap.of("foo", "bar"),
+ false,
null,
null,
null,
diff --git a/kafka-eight/pom.xml b/kafka-eight/pom.xml
index c1eef6906a3..9ed51e530bd 100644
--- a/kafka-eight/pom.xml
+++ b/kafka-eight/pom.xml
@@ -28,7 +28,7 @@
io.druid
druid
- 0.6.117-SNAPSHOT
+ 0.6.121-SNAPSHOT
diff --git a/kafka-seven/pom.xml b/kafka-seven/pom.xml
index 699f6e0a3fd..dff8aada79d 100644
--- a/kafka-seven/pom.xml
+++ b/kafka-seven/pom.xml
@@ -28,7 +28,7 @@
io.druid
druid
- 0.6.117-SNAPSHOT
+ 0.6.121-SNAPSHOT
diff --git a/pom.xml b/pom.xml
index fd02b96820e..12bfd8df636 100644
--- a/pom.xml
+++ b/pom.xml
@@ -23,7 +23,7 @@
io.druid
druid
pom
- 0.6.117-SNAPSHOT
+ 0.6.121-SNAPSHOT
druid
druid
diff --git a/processing/pom.xml b/processing/pom.xml
index 7a105c1d656..e714babd8cf 100644
--- a/processing/pom.xml
+++ b/processing/pom.xml
@@ -28,7 +28,7 @@
io.druid
druid
- 0.6.117-SNAPSHOT
+ 0.6.121-SNAPSHOT
diff --git a/rabbitmq/pom.xml b/rabbitmq/pom.xml
index e1195c91435..8345cfc1007 100644
--- a/rabbitmq/pom.xml
+++ b/rabbitmq/pom.xml
@@ -9,7 +9,7 @@
io.druid
druid
- 0.6.117-SNAPSHOT
+ 0.6.121-SNAPSHOT
diff --git a/s3-extensions/pom.xml b/s3-extensions/pom.xml
index 36ae75e9398..967c9f70422 100644
--- a/s3-extensions/pom.xml
+++ b/s3-extensions/pom.xml
@@ -28,7 +28,7 @@
io.druid
druid
- 0.6.117-SNAPSHOT
+ 0.6.121-SNAPSHOT
diff --git a/server/pom.xml b/server/pom.xml
index 36251f38664..e097487ea02 100644
--- a/server/pom.xml
+++ b/server/pom.xml
@@ -28,7 +28,7 @@
io.druid
druid
- 0.6.117-SNAPSHOT
+ 0.6.121-SNAPSHOT
diff --git a/server/src/main/java/io/druid/client/BatchServerInventoryView.java b/server/src/main/java/io/druid/client/BatchServerInventoryView.java
index 509c4a5c793..a2a3c422aa1 100644
--- a/server/src/main/java/io/druid/client/BatchServerInventoryView.java
+++ b/server/src/main/java/io/druid/client/BatchServerInventoryView.java
@@ -24,6 +24,7 @@ import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.api.client.repackaged.com.google.common.base.Preconditions;
import com.google.common.base.Predicate;
import com.google.common.base.Predicates;
+import com.google.common.collect.Iterables;
import com.google.common.collect.MapMaker;
import com.google.common.collect.Sets;
import com.google.inject.Inject;
@@ -79,7 +80,8 @@ public class BatchServerInventoryView extends ServerInventoryView predicate = Predicates.or(defaultFilter, Predicates.or(segmentPredicates.values()));
- Set filteredInventory = Sets.filter(inventory, predicate);
+ // make a copy of the set and not just a filtered view, in order to not keep all the segment data in memory
+ Set filteredInventory = Sets.newHashSet(Iterables.filter(inventory, predicate));
zNodes.put(inventoryKey, filteredInventory);
for (DataSegment segment : filteredInventory) {
@@ -94,7 +96,8 @@ public class BatchServerInventoryView extends ServerInventoryView predicate = Predicates.or(defaultFilter, Predicates.or(segmentPredicates.values()));
- Set filteredInventory = Sets.filter(inventory, predicate);
+ // make a copy of the set and not just a filtered view, in order to not keep all the segment data in memory
+ Set filteredInventory = Sets.newHashSet(Iterables.filter(inventory, predicate));
Set existing = zNodes.get(inventoryKey);
if (existing == null) {
diff --git a/services/pom.xml b/services/pom.xml
index 749acffb75c..3787e7a6b96 100644
--- a/services/pom.xml
+++ b/services/pom.xml
@@ -27,7 +27,7 @@
io.druid
druid
- 0.6.117-SNAPSHOT
+ 0.6.121-SNAPSHOT
diff --git a/services/src/main/java/io/druid/cli/CliInternalHadoopIndexer.java b/services/src/main/java/io/druid/cli/CliInternalHadoopIndexer.java
index 7e88cabb2a5..007b56fc076 100644
--- a/services/src/main/java/io/druid/cli/CliInternalHadoopIndexer.java
+++ b/services/src/main/java/io/druid/cli/CliInternalHadoopIndexer.java
@@ -38,7 +38,7 @@ import java.util.List;
*/
@Command(
name = "hadoop-indexer",
- description = "Runs the batch Hadoop Druid Indexer, see https://github.com/metamx/druid/wiki/Batch-ingestion for a description."
+ description = "Runs the batch Hadoop Druid Indexer, see http://druid.io/docs/latest/Batch-ingestion.html for a description."
)
public class CliInternalHadoopIndexer implements Runnable
{
@@ -67,14 +67,13 @@ public class CliInternalHadoopIndexer implements Runnable
try {
HadoopIngestionSpec spec;
if (argumentSpec.startsWith("{")) {
- spec = HadoopDruidIndexerConfig.jsonMapper.readValue(argumentSpec, HadoopIngestionSpec.class);
+ return HadoopDruidIndexerConfig.fromString(argumentSpec);
} else {
- spec = HadoopDruidIndexerConfig.jsonMapper.readValue(new File(argumentSpec), HadoopIngestionSpec.class);
+ return HadoopDruidIndexerConfig.fromFile(new File(argumentSpec));
}
- return HadoopDruidIndexerConfig.fromSchema(spec);
}
catch (Exception e) {
throw Throwables.propagate(e);
}
}
-}
\ No newline at end of file
+}