Merge branch 'master' into refactor-indexing

This commit is contained in:
fjy 2013-05-15 15:37:25 -07:00
commit 1d232ee930
3 changed files with 58 additions and 26 deletions

View File

@ -199,7 +199,7 @@ public class HadoopDruidIndexerConfig
final @JsonProperty("targetPartitionSize") Long targetPartitionSize,
final @JsonProperty("partitionsSpec") PartitionsSpec partitionsSpec,
final @JsonProperty("leaveIntermediate") boolean leaveIntermediate,
final @JsonProperty("cleanupOnFailure") boolean cleanupOnFailure,
final @JsonProperty("cleanupOnFailure") Boolean cleanupOnFailure,
final @JsonProperty("shardSpecs") Map<DateTime, List<HadoopyShardSpec>> shardSpecs,
final @JsonProperty("overwriteFiles") boolean overwriteFiles,
final @JsonProperty("rollupSpec") DataRollupSpec rollupSpec,
@ -219,7 +219,7 @@ public class HadoopDruidIndexerConfig
this.version = version == null ? new DateTime().toString() : version;
this.partitionsSpec = partitionsSpec;
this.leaveIntermediate = leaveIntermediate;
this.cleanupOnFailure = cleanupOnFailure;
this.cleanupOnFailure = (cleanupOnFailure == null ? true : cleanupOnFailure);
this.shardSpecs = shardSpecs;
this.overwriteFiles = overwriteFiles;
this.rollupSpec = rollupSpec;

View File

@ -102,6 +102,10 @@ public class HadoopDruidIndexerJob implements Jobby
}
}
if (failedMessage == null) {
publishedSegments = IndexGeneratorJob.getPublishedSegments(config);
}
if (!config.isLeaveIntermediate()) {
if (failedMessage == null || config.isCleanupOnFailure()) {
Path workingPath = config.makeIntermediatePath();
@ -119,8 +123,6 @@ public class HadoopDruidIndexerJob implements Jobby
throw new ISE(failedMessage);
}
publishedSegments = IndexGeneratorJob.getPublishedSegments(config);
return true;
}

View File

@ -157,28 +157,6 @@ public class HadoopDruidIndexerConfigTest
Assert.assertTrue("Exception thrown", thrown);
}
@Test
public void testPartitionsSpecNoPartitioning() {
final HadoopDruidIndexerConfig cfg;
try {
cfg = jsonReadWriteRead(
"{}",
HadoopDruidIndexerConfig.class
);
} catch(Exception e) {
throw Throwables.propagate(e);
}
final PartitionsSpec partitionsSpec = cfg.getPartitionsSpec();
Assert.assertEquals(
"isDeterminingPartitions",
partitionsSpec.isDeterminingPartitions(),
false
);
}
@Test
public void testPartitionsSpecAutoDimension() {
final HadoopDruidIndexerConfig cfg;
@ -397,6 +375,58 @@ public class HadoopDruidIndexerConfigTest
Assert.assertEquals(false, spec.useValidationQuery());
}
@Test
public void testDefaultSettings() {
final HadoopDruidIndexerConfig cfg;
try {
cfg = jsonReadWriteRead(
"{}",
HadoopDruidIndexerConfig.class
);
} catch(Exception e) {
throw Throwables.propagate(e);
}
Assert.assertEquals(
"cleanupOnFailure",
cfg.isCleanupOnFailure(),
true
);
Assert.assertEquals(
"overwriteFiles",
cfg.isOverwriteFiles(),
false
);
Assert.assertEquals(
"isDeterminingPartitions",
cfg.getPartitionsSpec().isDeterminingPartitions(),
false
);
}
@Test
public void testNoCleanupOnFailure() {
final HadoopDruidIndexerConfig cfg;
try {
cfg = jsonReadWriteRead(
"{\"cleanupOnFailure\":false}",
HadoopDruidIndexerConfig.class
);
} catch(Exception e) {
throw Throwables.propagate(e);
}
Assert.assertEquals(
"cleanupOnFailure",
cfg.isCleanupOnFailure(),
false
);
}
private <T> T jsonReadWriteRead(String s, Class<T> klass)
{
try {