mirror of https://github.com/apache/druid.git
Allow index job to utilize hadoop cluster information from job config. (#4626)
* Allow ndex job to utilize hadoop cluster information from job config. * Add new method that inject system configuration and then job configuration. * Make changes to use HadoopDruidIndexerConfig.addJobProperties method. * refactor code for overloaded addJobProperties.
This commit is contained in:
parent
8dddccc687
commit
d6179126ed
|
@ -561,8 +561,11 @@ public class HadoopDruidIndexerConfig
|
|||
|
||||
public void addJobProperties(Job job)
|
||||
{
|
||||
Configuration conf = job.getConfiguration();
|
||||
addJobProperties(job.getConfiguration());
|
||||
}
|
||||
|
||||
public void addJobProperties(Configuration conf)
|
||||
{
|
||||
for (final Map.Entry<String, String> entry : schema.getTuningConfig().getJobProperties().entrySet()) {
|
||||
conf.set(entry.getKey(), entry.getValue());
|
||||
}
|
||||
|
|
|
@ -101,6 +101,8 @@ public class IndexGeneratorJob implements Jobby
|
|||
public static List<DataSegment> getPublishedSegments(HadoopDruidIndexerConfig config)
|
||||
{
|
||||
final Configuration conf = JobHelper.injectSystemProperties(new Configuration());
|
||||
config.addJobProperties(conf);
|
||||
|
||||
final ObjectMapper jsonMapper = HadoopDruidIndexerConfig.JSON_MAPPER;
|
||||
|
||||
ImmutableList.Builder<DataSegment> publishedSegmentsBuilder = ImmutableList.builder();
|
||||
|
|
|
@ -376,7 +376,9 @@ public class JobHelper
|
|||
Path workingPath = config.makeIntermediatePath();
|
||||
log.info("Deleting path[%s]", workingPath);
|
||||
try {
|
||||
workingPath.getFileSystem(injectSystemProperties(new Configuration())).delete(workingPath, true);
|
||||
Configuration conf = injectSystemProperties(new Configuration());
|
||||
config.addJobProperties(conf);
|
||||
workingPath.getFileSystem(conf).delete(workingPath, true);
|
||||
}
|
||||
catch (IOException e) {
|
||||
log.error(e, "Failed to cleanup path[%s]", workingPath);
|
||||
|
|
Loading…
Reference in New Issue