HadoopDruidIndexerConfig: Fix default cleanupOnFailure (should have been true)

This commit is contained in:
Gian Merlino 2013-05-15 01:36:49 -07:00
parent 06e09a1eb5
commit 384e9085f5
2 changed files with 54 additions and 24 deletions

View File

@ -199,7 +199,7 @@ public class HadoopDruidIndexerConfig
final @JsonProperty("targetPartitionSize") Long targetPartitionSize, final @JsonProperty("targetPartitionSize") Long targetPartitionSize,
final @JsonProperty("partitionsSpec") PartitionsSpec partitionsSpec, final @JsonProperty("partitionsSpec") PartitionsSpec partitionsSpec,
final @JsonProperty("leaveIntermediate") boolean leaveIntermediate, final @JsonProperty("leaveIntermediate") boolean leaveIntermediate,
final @JsonProperty("cleanupOnFailure") boolean cleanupOnFailure, final @JsonProperty("cleanupOnFailure") Boolean cleanupOnFailure,
final @JsonProperty("shardSpecs") Map<DateTime, List<HadoopyShardSpec>> shardSpecs, final @JsonProperty("shardSpecs") Map<DateTime, List<HadoopyShardSpec>> shardSpecs,
final @JsonProperty("overwriteFiles") boolean overwriteFiles, final @JsonProperty("overwriteFiles") boolean overwriteFiles,
final @JsonProperty("rollupSpec") DataRollupSpec rollupSpec, final @JsonProperty("rollupSpec") DataRollupSpec rollupSpec,
@ -219,7 +219,7 @@ public class HadoopDruidIndexerConfig
this.version = version == null ? new DateTime().toString() : version; this.version = version == null ? new DateTime().toString() : version;
this.partitionsSpec = partitionsSpec; this.partitionsSpec = partitionsSpec;
this.leaveIntermediate = leaveIntermediate; this.leaveIntermediate = leaveIntermediate;
this.cleanupOnFailure = cleanupOnFailure; this.cleanupOnFailure = (cleanupOnFailure == null ? true : cleanupOnFailure);
this.shardSpecs = shardSpecs; this.shardSpecs = shardSpecs;
this.overwriteFiles = overwriteFiles; this.overwriteFiles = overwriteFiles;
this.rollupSpec = rollupSpec; this.rollupSpec = rollupSpec;

View File

@ -157,28 +157,6 @@ public class HadoopDruidIndexerConfigTest
Assert.assertTrue("Exception thrown", thrown); Assert.assertTrue("Exception thrown", thrown);
} }
@Test
public void testPartitionsSpecNoPartitioning() {
final HadoopDruidIndexerConfig cfg;
try {
cfg = jsonReadWriteRead(
"{}",
HadoopDruidIndexerConfig.class
);
} catch(Exception e) {
throw Throwables.propagate(e);
}
final PartitionsSpec partitionsSpec = cfg.getPartitionsSpec();
Assert.assertEquals(
"isDeterminingPartitions",
partitionsSpec.isDeterminingPartitions(),
false
);
}
@Test @Test
public void testPartitionsSpecAutoDimension() { public void testPartitionsSpecAutoDimension() {
final HadoopDruidIndexerConfig cfg; final HadoopDruidIndexerConfig cfg;
@ -397,6 +375,58 @@ public class HadoopDruidIndexerConfigTest
Assert.assertEquals(false, spec.useValidationQuery()); Assert.assertEquals(false, spec.useValidationQuery());
} }
@Test
public void testDefaultSettings() {
final HadoopDruidIndexerConfig cfg;
try {
cfg = jsonReadWriteRead(
"{}",
HadoopDruidIndexerConfig.class
);
} catch(Exception e) {
throw Throwables.propagate(e);
}
Assert.assertEquals(
"cleanupOnFailure",
cfg.isCleanupOnFailure(),
true
);
Assert.assertEquals(
"overwriteFiles",
cfg.isOverwriteFiles(),
false
);
Assert.assertEquals(
"isDeterminingPartitions",
cfg.getPartitionsSpec().isDeterminingPartitions(),
false
);
}
@Test
public void testNoCleanupOnFailure() {
final HadoopDruidIndexerConfig cfg;
try {
cfg = jsonReadWriteRead(
"{\"cleanupOnFailure\":false}",
HadoopDruidIndexerConfig.class
);
} catch(Exception e) {
throw Throwables.propagate(e);
}
Assert.assertEquals(
"cleanupOnFailure",
cfg.isCleanupOnFailure(),
false
);
}
private <T> T jsonReadWriteRead(String s, Class<T> klass) private <T> T jsonReadWriteRead(String s, Class<T> klass)
{ {
try { try {