diff --git a/indexer/src/main/java/com/metamx/druid/indexer/HadoopDruidIndexerConfig.java b/indexer/src/main/java/com/metamx/druid/indexer/HadoopDruidIndexerConfig.java index 364b880518c..e25da4a27d5 100644 --- a/indexer/src/main/java/com/metamx/druid/indexer/HadoopDruidIndexerConfig.java +++ b/indexer/src/main/java/com/metamx/druid/indexer/HadoopDruidIndexerConfig.java @@ -199,7 +199,7 @@ public class HadoopDruidIndexerConfig final @JsonProperty("targetPartitionSize") Long targetPartitionSize, final @JsonProperty("partitionsSpec") PartitionsSpec partitionsSpec, final @JsonProperty("leaveIntermediate") boolean leaveIntermediate, - final @JsonProperty("cleanupOnFailure") boolean cleanupOnFailure, + final @JsonProperty("cleanupOnFailure") Boolean cleanupOnFailure, final @JsonProperty("shardSpecs") Map> shardSpecs, final @JsonProperty("overwriteFiles") boolean overwriteFiles, final @JsonProperty("rollupSpec") DataRollupSpec rollupSpec, @@ -219,7 +219,7 @@ public class HadoopDruidIndexerConfig this.version = version == null ? new DateTime().toString() : version; this.partitionsSpec = partitionsSpec; this.leaveIntermediate = leaveIntermediate; - this.cleanupOnFailure = cleanupOnFailure; + this.cleanupOnFailure = (cleanupOnFailure == null ? true : cleanupOnFailure); this.shardSpecs = shardSpecs; this.overwriteFiles = overwriteFiles; this.rollupSpec = rollupSpec; diff --git a/indexer/src/test/java/com/metamx/druid/indexer/HadoopDruidIndexerConfigTest.java b/indexer/src/test/java/com/metamx/druid/indexer/HadoopDruidIndexerConfigTest.java index 73dcd252055..dcabe168e67 100644 --- a/indexer/src/test/java/com/metamx/druid/indexer/HadoopDruidIndexerConfigTest.java +++ b/indexer/src/test/java/com/metamx/druid/indexer/HadoopDruidIndexerConfigTest.java @@ -157,28 +157,6 @@ public class HadoopDruidIndexerConfigTest Assert.assertTrue("Exception thrown", thrown); } - @Test - public void testPartitionsSpecNoPartitioning() { - final HadoopDruidIndexerConfig cfg; - - try { - cfg = jsonReadWriteRead( - "{}", - HadoopDruidIndexerConfig.class - ); - } catch(Exception e) { - throw Throwables.propagate(e); - } - - final PartitionsSpec partitionsSpec = cfg.getPartitionsSpec(); - - Assert.assertEquals( - "isDeterminingPartitions", - partitionsSpec.isDeterminingPartitions(), - false - ); - } - @Test public void testPartitionsSpecAutoDimension() { final HadoopDruidIndexerConfig cfg; @@ -397,6 +375,58 @@ public class HadoopDruidIndexerConfigTest Assert.assertEquals(false, spec.useValidationQuery()); } + @Test + public void testDefaultSettings() { + final HadoopDruidIndexerConfig cfg; + + try { + cfg = jsonReadWriteRead( + "{}", + HadoopDruidIndexerConfig.class + ); + } catch(Exception e) { + throw Throwables.propagate(e); + } + + Assert.assertEquals( + "cleanupOnFailure", + cfg.isCleanupOnFailure(), + true + ); + + Assert.assertEquals( + "overwriteFiles", + cfg.isOverwriteFiles(), + false + ); + + Assert.assertEquals( + "isDeterminingPartitions", + cfg.getPartitionsSpec().isDeterminingPartitions(), + false + ); + } + + @Test + public void testNoCleanupOnFailure() { + final HadoopDruidIndexerConfig cfg; + + try { + cfg = jsonReadWriteRead( + "{\"cleanupOnFailure\":false}", + HadoopDruidIndexerConfig.class + ); + } catch(Exception e) { + throw Throwables.propagate(e); + } + + Assert.assertEquals( + "cleanupOnFailure", + cfg.isCleanupOnFailure(), + false + ); + } + private T jsonReadWriteRead(String s, Class klass) { try {