mirror of https://github.com/apache/druid.git
Merge pull request #1823 from himanshug/datasource_pathspec_querygranularity
For dataSource inputSpec in hadoop batch ingestion, use configured query granularity for reading existing segments instead of NONE
This commit is contained in:
commit
75061dad67
|
@ -102,6 +102,11 @@ public class DatasourceIngestionSpec
|
|||
return new DatasourceIngestionSpec(dataSource, interval, filter, granularity, dimensions, metrics);
|
||||
}
|
||||
|
||||
public DatasourceIngestionSpec withQueryGranularity(QueryGranularity granularity)
|
||||
{
|
||||
return new DatasourceIngestionSpec(dataSource, interval, filter, granularity, dimensions, metrics);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o)
|
||||
{
|
||||
|
|
|
@ -147,6 +147,8 @@ public class DatasourcePathSpec implements PathSpec
|
|||
updatedIngestionSpec = updatedIngestionSpec.withMetrics(Lists.newArrayList(metrics));
|
||||
}
|
||||
|
||||
updatedIngestionSpec = updatedIngestionSpec.withQueryGranularity(config.getGranularitySpec().getQueryGranularity());
|
||||
|
||||
job.getConfiguration().set(DatasourceInputFormat.CONF_DRUID_SCHEMA, mapper.writeValueAsString(updatedIngestionSpec));
|
||||
job.getConfiguration().set(DatasourceInputFormat.CONF_INPUT_SEGMENTS, mapper.writeValueAsString(segments));
|
||||
job.getConfiguration().set(DatasourceInputFormat.CONF_MAX_SPLIT_SIZE, String.valueOf(maxSplitSize));
|
||||
|
|
Loading…
Reference in New Issue