diff --git a/client/pom.xml b/client/pom.xml
index 93dad68d81f..c5b28662317 100644
--- a/client/pom.xml
+++ b/client/pom.xml
@@ -28,7 +28,7 @@
com.metamx
druid
- 0.4.13-SNAPSHOT
+ 0.4.15-SNAPSHOT
diff --git a/common/pom.xml b/common/pom.xml
index 9758c0822df..2fa1c0d21bd 100644
--- a/common/pom.xml
+++ b/common/pom.xml
@@ -28,7 +28,7 @@
com.metamx
druid
- 0.4.13-SNAPSHOT
+ 0.4.15-SNAPSHOT
diff --git a/common/src/main/java/com/metamx/druid/db/DbConnector.java b/common/src/main/java/com/metamx/druid/db/DbConnector.java
index 1e73b731353..cb202b1a8f2 100644
--- a/common/src/main/java/com/metamx/druid/db/DbConnector.java
+++ b/common/src/main/java/com/metamx/druid/db/DbConnector.java
@@ -181,7 +181,7 @@ public class DbConnector
dataSource.setPassword(config.getDatabasePassword());
dataSource.setUrl(config.getDatabaseConnectURI());
- if (config.isValidationQuery()) {
+ if (config.useValidationQuery()) {
dataSource.setValidationQuery(config.getValidationQuery());
dataSource.setTestOnBorrow(true);
}
diff --git a/common/src/main/java/com/metamx/druid/db/DbConnectorConfig.java b/common/src/main/java/com/metamx/druid/db/DbConnectorConfig.java
index fb7d1a99916..e17302a3183 100644
--- a/common/src/main/java/com/metamx/druid/db/DbConnectorConfig.java
+++ b/common/src/main/java/com/metamx/druid/db/DbConnectorConfig.java
@@ -44,7 +44,7 @@ public abstract class DbConnectorConfig
@JsonProperty("useValidationQuery")
@Config("druid.database.validation")
- public boolean isValidationQuery() {
+ public boolean useValidationQuery() {
return false;
}
diff --git a/examples/pom.xml b/examples/pom.xml
index cb3070afda0..4ec32589df5 100644
--- a/examples/pom.xml
+++ b/examples/pom.xml
@@ -9,7 +9,7 @@
com.metamx
druid
- 0.4.13-SNAPSHOT
+ 0.4.15-SNAPSHOT
diff --git a/index-common/pom.xml b/index-common/pom.xml
index a0723e20af0..eb8cbbe5b4c 100644
--- a/index-common/pom.xml
+++ b/index-common/pom.xml
@@ -28,7 +28,7 @@
com.metamx
druid
- 0.4.13-SNAPSHOT
+ 0.4.15-SNAPSHOT
diff --git a/indexer/pom.xml b/indexer/pom.xml
index 546a7eb21ff..917babbf034 100644
--- a/indexer/pom.xml
+++ b/indexer/pom.xml
@@ -28,7 +28,7 @@
com.metamx
druid
- 0.4.13-SNAPSHOT
+ 0.4.15-SNAPSHOT
diff --git a/indexer/src/main/java/com/metamx/druid/indexer/DeterminePartitionsJob.java b/indexer/src/main/java/com/metamx/druid/indexer/DeterminePartitionsJob.java
index f34ff2988f2..425b33cedff 100644
--- a/indexer/src/main/java/com/metamx/druid/indexer/DeterminePartitionsJob.java
+++ b/indexer/src/main/java/com/metamx/druid/indexer/DeterminePartitionsJob.java
@@ -504,7 +504,6 @@ public class DeterminePartitionsJob implements Jobby
public static class DeterminePartitionsDimSelectionReducer extends DeterminePartitionsDimSelectionBaseReducer
{
private static final double SHARD_COMBINE_THRESHOLD = 0.25;
- private static final double SHARD_OVERSIZE_THRESHOLD = 1.5;
private static final int HIGH_CARDINALITY_THRESHOLD = 3000000;
@Override
@@ -672,7 +671,7 @@ public class DeterminePartitionsJob implements Jobby
// Make sure none of these shards are oversized
boolean oversized = false;
for(final DimPartition partition : dimPartitions.partitions) {
- if(partition.rows > config.getTargetPartitionSize() * SHARD_OVERSIZE_THRESHOLD) {
+ if(partition.rows > config.getMaxPartitionSize()) {
log.info("Dimension[%s] has an oversized shard: %s", dimPartitions.dim, partition.shardSpec);
oversized = true;
}
diff --git a/indexer/src/main/java/com/metamx/druid/indexer/HadoopDruidIndexerConfig.java b/indexer/src/main/java/com/metamx/druid/indexer/HadoopDruidIndexerConfig.java
index 1dfad9de181..364b880518c 100644
--- a/indexer/src/main/java/com/metamx/druid/indexer/HadoopDruidIndexerConfig.java
+++ b/indexer/src/main/java/com/metamx/druid/indexer/HadoopDruidIndexerConfig.java
@@ -236,7 +236,7 @@ public class HadoopDruidIndexerConfig
this.partitionsSpec = partitionsSpec;
} else {
// Backwards compatibility
- this.partitionsSpec = new PartitionsSpec(partitionDimension, targetPartitionSize, false);
+ this.partitionsSpec = new PartitionsSpec(partitionDimension, targetPartitionSize, null, false);
}
if(granularitySpec != null) {
@@ -431,6 +431,11 @@ public class HadoopDruidIndexerConfig
return partitionsSpec.getTargetPartitionSize();
}
+ public long getMaxPartitionSize()
+ {
+ return partitionsSpec.getMaxPartitionSize();
+ }
+
public boolean isUpdaterJobSpecSet()
{
return (updaterJobSpec != null);
diff --git a/indexer/src/main/java/com/metamx/druid/indexer/partitions/PartitionsSpec.java b/indexer/src/main/java/com/metamx/druid/indexer/partitions/PartitionsSpec.java
index e30bad393f6..5571422585c 100644
--- a/indexer/src/main/java/com/metamx/druid/indexer/partitions/PartitionsSpec.java
+++ b/indexer/src/main/java/com/metamx/druid/indexer/partitions/PartitionsSpec.java
@@ -8,22 +8,30 @@ import javax.annotation.Nullable;
public class PartitionsSpec
{
+ private static final double DEFAULT_OVERSIZE_THRESHOLD = 1.5;
+
@Nullable
private final String partitionDimension;
private final long targetPartitionSize;
+ private final long maxPartitionSize;
+
private final boolean assumeGrouped;
@JsonCreator
public PartitionsSpec(
@JsonProperty("partitionDimension") @Nullable String partitionDimension,
@JsonProperty("targetPartitionSize") @Nullable Long targetPartitionSize,
+ @JsonProperty("maxPartitionSize") @Nullable Long maxPartitionSize,
@JsonProperty("assumeGrouped") @Nullable Boolean assumeGrouped
)
{
this.partitionDimension = partitionDimension;
this.targetPartitionSize = targetPartitionSize == null ? -1 : targetPartitionSize;
+ this.maxPartitionSize = maxPartitionSize == null
+ ? (long) (this.targetPartitionSize * DEFAULT_OVERSIZE_THRESHOLD)
+ : maxPartitionSize;
this.assumeGrouped = assumeGrouped == null ? false : assumeGrouped;
}
@@ -46,6 +54,12 @@ public class PartitionsSpec
return targetPartitionSize;
}
+ @JsonProperty
+ public long getMaxPartitionSize()
+ {
+ return maxPartitionSize;
+ }
+
@JsonProperty
public boolean isAssumeGrouped()
{
diff --git a/indexer/src/test/java/com/metamx/druid/indexer/HadoopDruidIndexerConfigTest.java b/indexer/src/test/java/com/metamx/druid/indexer/HadoopDruidIndexerConfigTest.java
index 5fdff8ce8b8..73dcd252055 100644
--- a/indexer/src/test/java/com/metamx/druid/indexer/HadoopDruidIndexerConfigTest.java
+++ b/indexer/src/test/java/com/metamx/druid/indexer/HadoopDruidIndexerConfigTest.java
@@ -24,6 +24,7 @@ import com.google.common.base.Throwables;
import com.google.common.collect.Lists;
import com.metamx.druid.indexer.granularity.UniformGranularitySpec;
import com.metamx.druid.indexer.partitions.PartitionsSpec;
+import com.metamx.druid.indexer.updater.DbUpdaterJobSpec;
import com.metamx.druid.jackson.DefaultObjectMapper;
import org.joda.time.Interval;
@@ -39,8 +40,8 @@ public class HadoopDruidIndexerConfigTest
final HadoopDruidIndexerConfig cfg;
try {
- cfg = jsonMapper.readValue(
- "{"
+ cfg = jsonReadWriteRead(
+ "{"
+ " \"granularitySpec\":{"
+ " \"type\":\"uniform\","
+ " \"gran\":\"hour\","
@@ -74,7 +75,7 @@ public class HadoopDruidIndexerConfigTest
final HadoopDruidIndexerConfig cfg;
try {
- cfg = jsonMapper.readValue(
+ cfg = jsonReadWriteRead(
"{"
+ "\"segmentGranularity\":\"day\","
+ "\"intervals\":[\"2012-02-01/P1D\"]"
@@ -137,7 +138,7 @@ public class HadoopDruidIndexerConfigTest
public void testInvalidGranularityCombination() {
boolean thrown = false;
try {
- final HadoopDruidIndexerConfig cfg = jsonMapper.readValue(
+ final HadoopDruidIndexerConfig cfg = jsonReadWriteRead(
"{"
+ "\"segmentGranularity\":\"day\","
+ "\"intervals\":[\"2012-02-01/P1D\"],"
@@ -161,7 +162,7 @@ public class HadoopDruidIndexerConfigTest
final HadoopDruidIndexerConfig cfg;
try {
- cfg = jsonMapper.readValue(
+ cfg = jsonReadWriteRead(
"{}",
HadoopDruidIndexerConfig.class
);
@@ -183,7 +184,7 @@ public class HadoopDruidIndexerConfigTest
final HadoopDruidIndexerConfig cfg;
try {
- cfg = jsonMapper.readValue(
+ cfg = jsonReadWriteRead(
"{"
+ "\"partitionsSpec\":{"
+ " \"targetPartitionSize\":100"
@@ -221,7 +222,7 @@ public class HadoopDruidIndexerConfigTest
final HadoopDruidIndexerConfig cfg;
try {
- cfg = jsonMapper.readValue(
+ cfg = jsonReadWriteRead(
"{"
+ "\"partitionsSpec\":{"
+ " \"targetPartitionSize\":100,"
@@ -248,6 +249,12 @@ public class HadoopDruidIndexerConfigTest
100
);
+ Assert.assertEquals(
+ "getMaxPartitionSize",
+ partitionsSpec.getMaxPartitionSize(),
+ 150
+ );
+
Assert.assertEquals(
"getPartitionDimension",
partitionsSpec.getPartitionDimension(),
@@ -260,7 +267,7 @@ public class HadoopDruidIndexerConfigTest
final HadoopDruidIndexerConfig cfg;
try {
- cfg = jsonMapper.readValue(
+ cfg = jsonReadWriteRead(
"{"
+ "\"targetPartitionSize\":100,"
+ "\"partitionDimension\":\"foo\""
@@ -285,6 +292,58 @@ public class HadoopDruidIndexerConfigTest
100
);
+ Assert.assertEquals(
+ "getMaxPartitionSize",
+ partitionsSpec.getMaxPartitionSize(),
+ 150
+ );
+
+ Assert.assertEquals(
+ "getPartitionDimension",
+ partitionsSpec.getPartitionDimension(),
+ "foo"
+ );
+ }
+
+ @Test
+ public void testPartitionsSpecMaxPartitionSize() {
+ final HadoopDruidIndexerConfig cfg;
+
+ try {
+ cfg = jsonReadWriteRead(
+ "{"
+ + "\"partitionsSpec\":{"
+ + " \"targetPartitionSize\":100,"
+ + " \"maxPartitionSize\":200,"
+ + " \"partitionDimension\":\"foo\""
+ + " }"
+ + "}",
+ HadoopDruidIndexerConfig.class
+ );
+ } catch(Exception e) {
+ throw Throwables.propagate(e);
+ }
+
+ final PartitionsSpec partitionsSpec = cfg.getPartitionsSpec();
+
+ Assert.assertEquals(
+ "isDeterminingPartitions",
+ partitionsSpec.isDeterminingPartitions(),
+ true
+ );
+
+ Assert.assertEquals(
+ "getTargetPartitionSize",
+ partitionsSpec.getTargetPartitionSize(),
+ 100
+ );
+
+ Assert.assertEquals(
+ "getMaxPartitionSize",
+ partitionsSpec.getMaxPartitionSize(),
+ 200
+ );
+
Assert.assertEquals(
"getPartitionDimension",
partitionsSpec.getPartitionDimension(),
@@ -296,7 +355,7 @@ public class HadoopDruidIndexerConfigTest
public void testInvalidPartitionsCombination() {
boolean thrown = false;
try {
- final HadoopDruidIndexerConfig cfg = jsonMapper.readValue(
+ final HadoopDruidIndexerConfig cfg = jsonReadWriteRead(
"{"
+ "\"targetPartitionSize\":100,"
+ "\"partitionsSpec\":{"
@@ -311,4 +370,40 @@ public class HadoopDruidIndexerConfigTest
Assert.assertTrue("Exception thrown", thrown);
}
+
+ @Test
+ public void testDbUpdaterJobSpec() throws Exception
+ {
+ final HadoopDruidIndexerConfig cfg;
+
+ cfg = jsonReadWriteRead(
+ "{"
+ + "\"updaterJobSpec\":{\n"
+ + " \"type\" : \"db\",\n"
+ + " \"connectURI\" : \"jdbc:mysql://localhost/druid\",\n"
+ + " \"user\" : \"rofl\",\n"
+ + " \"password\" : \"p4ssw0rd\",\n"
+ + " \"segmentTable\" : \"segments\"\n"
+ + " }"
+ + "}",
+ HadoopDruidIndexerConfig.class
+ );
+
+ final DbUpdaterJobSpec spec = (DbUpdaterJobSpec) cfg.getUpdaterJobSpec();
+ Assert.assertEquals("segments", spec.getSegmentTable());
+ Assert.assertEquals("jdbc:mysql://localhost/druid", spec.getDatabaseConnectURI());
+ Assert.assertEquals("rofl", spec.getDatabaseUser());
+ Assert.assertEquals("p4ssw0rd", spec.getDatabasePassword());
+ Assert.assertEquals(false, spec.useValidationQuery());
+ }
+
+ private T jsonReadWriteRead(String s, Class klass)
+ {
+ try {
+ return jsonMapper.readValue(jsonMapper.writeValueAsBytes(jsonMapper.readValue(s, klass)), klass);
+ }
+ catch (Exception e) {
+ throw Throwables.propagate(e);
+ }
+ }
}
diff --git a/merger/pom.xml b/merger/pom.xml
index fcbd7f29f3e..48b843a3cb6 100644
--- a/merger/pom.xml
+++ b/merger/pom.xml
@@ -28,7 +28,7 @@
com.metamx
druid
- 0.4.13-SNAPSHOT
+ 0.4.15-SNAPSHOT
diff --git a/pom.xml b/pom.xml
index 00b7f403c1c..27afe122406 100644
--- a/pom.xml
+++ b/pom.xml
@@ -23,7 +23,7 @@
com.metamx
druid
pom
- 0.4.13-SNAPSHOT
+ 0.4.15-SNAPSHOT
druid
druid
diff --git a/realtime/pom.xml b/realtime/pom.xml
index c12f4bd0d84..146b338c413 100644
--- a/realtime/pom.xml
+++ b/realtime/pom.xml
@@ -28,7 +28,7 @@
com.metamx
druid
- 0.4.13-SNAPSHOT
+ 0.4.15-SNAPSHOT
diff --git a/server/pom.xml b/server/pom.xml
index 194991b7c56..fa78634de9c 100644
--- a/server/pom.xml
+++ b/server/pom.xml
@@ -28,7 +28,7 @@
com.metamx
druid
- 0.4.13-SNAPSHOT
+ 0.4.15-SNAPSHOT
diff --git a/services/pom.xml b/services/pom.xml
index 0c2c9d7dc66..e09722ff99d 100644
--- a/services/pom.xml
+++ b/services/pom.xml
@@ -24,11 +24,11 @@
druid-services
druid-services
druid-services
- 0.4.13-SNAPSHOT
+ 0.4.15-SNAPSHOT
com.metamx
druid
- 0.4.13-SNAPSHOT
+ 0.4.15-SNAPSHOT