optionally add extensions to explicitly specified hadoopContainerClassPath (#4230)

* optionally add extensions to explicitly specified hadoopContainerClassPath

* note extensions always pushed in hadoop container when druid.extensions.hadoopContainerDruidClasspath is not provided explicitly
This commit is contained in:
Himanshu 2017-05-08 14:24:14 -05:00 committed by Parag Jain
parent bba31e0c8b
commit 462f6482df
3 changed files with 26 additions and 8 deletions

View File

@ -23,9 +23,10 @@ Many of Druid's external dependencies can be plugged in as modules. Extensions c
|--------|-----------|-------|
|`druid.extensions.directory`|The root extension directory where user can put extensions related files. Druid will load extensions stored under this directory.|`extensions` (This is a relative path to Druid's working directory)|
|`druid.extensions.hadoopDependenciesDir`|The root hadoop dependencies directory where user can put hadoop related dependencies files. Druid will load the dependencies based on the hadoop coordinate specified in the hadoop index task.|`hadoop-dependencies` (This is a relative path to Druid's working directory|
|`druid.extensions.hadoopContainerDruidClasspath`|Hadoop Indexing launches hadoop jobs and this configuration provides way to explicitly set the user classpath for the hadoop job. By default this is computed automatically by druid based on the druid process classpath and set of extensions. However, sometimes you might want to be explicit to resolve dependency conflicts between druid and hadoop.|null|
|`druid.extensions.loadList`|A JSON array of extensions to load from extension directories by Druid. If it is not specified, its value will be `null` and Druid will load all the extensions under `druid.extensions.directory`. If its value is empty list `[]`, then no extensions will be loaded at all. It is also allowed to specify absolute path of other custom extensions not stored in the common extensions directory.|null|
|`druid.extensions.searchCurrentClassloader`|This is a boolean flag that determines if Druid will search the main classloader for extensions. It defaults to true but can be turned off if you have reason to not automatically add all modules on the classpath.|true|
|`druid.extensions.hadoopContainerDruidClasspath`|Hadoop Indexing launches hadoop jobs and this configuration provides way to explicitly set the user classpath for the hadoop job. By default this is computed automatically by druid based on the druid process classpath and set of extensions. However, sometimes you might want to be explicit to resolve dependency conflicts between druid and hadoop.|null|
|`druid.extensions.addExtensionsToHadoopContainer`|Only applicable if `druid.extensions.hadoopContainerDruidClasspath` is provided. If set to true, then extensions specified in the loadList are added to hadoop container classpath. Note that when `druid.extensions.hadoopContainerDruidClasspath` is not provided then extensions are always added to hadoop container classpath.|false|
### Zookeeper
We recommend just setting the base ZK path and the ZK service host, but all ZK paths that Druid uses can be overwritten to absolute paths.

View File

@ -25,7 +25,6 @@ import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.inject.Injector;
import io.druid.guice.ExtensionsConfig;
import io.druid.guice.GuiceInjectors;
import io.druid.indexing.common.TaskToolbox;
@ -142,11 +141,14 @@ public abstract class HadoopTask extends AbstractTask
Arrays.asList(((URLClassLoader) HadoopIndexTask.class.getClassLoader()).getURLs())
);
final List<URL> extensionURLs = Lists.newArrayList();
for (final File extension : Initialization.getExtensionFilesToLoad(extensionsConfig)) {
final ClassLoader extensionLoader = Initialization.getClassLoaderForExtension(extension);
jobURLs.addAll(Arrays.asList(((URLClassLoader) extensionLoader).getURLs()));
extensionURLs.addAll(Arrays.asList(((URLClassLoader) extensionLoader).getURLs()));
}
jobURLs.addAll(extensionURLs);
final List<URL> localClassLoaderURLs = new ArrayList<>(jobURLs);
// hadoop dependencies come before druid classes because some extensions depend on them
@ -169,11 +171,16 @@ public abstract class HadoopTask extends AbstractTask
hadoopContainerDruidClasspathJars = Joiner.on(File.pathSeparator).join(jobURLs);
} else {
hadoopContainerDruidClasspathJars =
Joiner.on(File.pathSeparator)
.join(
Initialization.getURLsForClasspath(extensionsConfig.getHadoopContainerDruidClasspath())
);
List<URL> hadoopContainerURLs = Lists.newArrayList(
Initialization.getURLsForClasspath(extensionsConfig.getHadoopContainerDruidClasspath())
);
if (extensionsConfig.getAddExtensionsToHadoopContainer()) {
hadoopContainerURLs.addAll(extensionURLs);
}
hadoopContainerDruidClasspathJars = Joiner.on(File.pathSeparator)
.join(hadoopContainerURLs);
}
log.info("Hadoop Container Druid Classpath is set to [%s]", hadoopContainerDruidClasspathJars);

View File

@ -41,6 +41,10 @@ public class ExtensionsConfig
@JsonProperty
private String hadoopContainerDruidClasspath = null;
//Only applicable when hadoopContainerDruidClasspath is explicitly specified.
@JsonProperty
private boolean addExtensionsToHadoopContainer = false;
@JsonProperty
private List<String> loadList;
@ -64,6 +68,11 @@ public class ExtensionsConfig
return hadoopContainerDruidClasspath;
}
public boolean getAddExtensionsToHadoopContainer()
{
return addExtensionsToHadoopContainer;
}
public List<String> getLoadList()
{
return loadList;
@ -77,6 +86,7 @@ public class ExtensionsConfig
", directory='" + directory + '\'' +
", hadoopDependenciesDir='" + hadoopDependenciesDir + '\'' +
", hadoopContainerDruidClasspath='" + hadoopContainerDruidClasspath + '\'' +
", addExtensionsToHadoopContainer=" + addExtensionsToHadoopContainer +
", loadList=" + loadList +
'}';
}