mirror of https://github.com/apache/druid.git
Merge pull request #1901 from guobingkun/fix_typo_and_rename
Fix metadata typo and rename default extension directory
This commit is contained in:
commit
8defe29270
|
@ -57,8 +57,8 @@
|
|||
<argument>-classpath</argument>
|
||||
<classpath/>
|
||||
<argument>-Ddruid.extensions.loadList=[]</argument>
|
||||
<argument>-Ddruid.extensions.directory=${project.build.directory}/druid_extensions</argument>
|
||||
<argument>-Ddruid.extensions.hadoopDependenciesDir=${project.build.directory}/hadoop_druid_dependencies</argument>
|
||||
<argument>-Ddruid.extensions.directory=${project.build.directory}/extensions</argument>
|
||||
<argument>-Ddruid.extensions.hadoopDependenciesDir=${project.build.directory}/hadoop_dependencies</argument>
|
||||
<argument>io.druid.cli.Main</argument>
|
||||
<argument>tools</argument>
|
||||
<argument>pull-deps</argument>
|
||||
|
@ -121,7 +121,7 @@
|
|||
<goal>single</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<finalName>mysql-metdata-storage</finalName>
|
||||
<finalName>mysql-metadata-storage</finalName>
|
||||
<tarLongFileMode>posix</tarLongFileMode>
|
||||
<descriptors>
|
||||
<descriptor>src/assembly/mysql_assembly.xml</descriptor>
|
||||
|
|
|
@ -25,21 +25,21 @@
|
|||
</formats>
|
||||
<fileSets>
|
||||
<fileSet>
|
||||
<directory>${project.build.directory}/druid_extensions</directory>
|
||||
<directory>${project.build.directory}/extensions</directory>
|
||||
<includes>
|
||||
<include>*/*</include>
|
||||
</includes>
|
||||
<excludes>
|
||||
<exclude>mysql-metadata-storage/**</exclude>
|
||||
</excludes>
|
||||
<outputDirectory>druid_extensions</outputDirectory>
|
||||
<outputDirectory>extensions</outputDirectory>
|
||||
</fileSet>
|
||||
<fileSet>
|
||||
<directory>${project.build.directory}/hadoop_druid_dependencies</directory>
|
||||
<directory>${project.build.directory}/hadoop_dependencies</directory>
|
||||
<includes>
|
||||
<include>*/*/*</include>
|
||||
</includes>
|
||||
<outputDirectory>hadoop_druid_dependencies</outputDirectory>
|
||||
<outputDirectory>hadoop_dependencies</outputDirectory>
|
||||
</fileSet>
|
||||
<fileSet>
|
||||
<directory>../examples/config</directory>
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
</formats>
|
||||
<fileSets>
|
||||
<fileSet>
|
||||
<directory>${project.build.directory}/druid_extensions/mysql-metadata-storage</directory>
|
||||
<directory>${project.build.directory}/extensions/mysql-metadata-storage</directory>
|
||||
<includes>
|
||||
<include>*</include>
|
||||
</includes>
|
||||
|
|
|
@ -21,8 +21,8 @@ Many of Druid's external dependencies can be plugged in as modules. Extensions c
|
|||
|
||||
|Property|Description|Default|
|
||||
|--------|-----------|-------|
|
||||
|`druid.extensions.directory`|The root extension directory where user can put extensions related files. Druid will load extensions stored under this directory.|`druid_extensions` (This is a relative path to Druid's working directory)|
|
||||
|`druid.extensions.hadoopDependenciesDir`|The root hadoop dependencies directory where user can put hadoop related dependencies files. Druid will load the dependencies based on the hadoop coordinate specified in the hadoop index task.|`hadoop_druid_dependencies` (This is a relative path to Druid's working directory|
|
||||
|`druid.extensions.directory`|The root extension directory where user can put extensions related files. Druid will load extensions stored under this directory.|`extensions` (This is a relative path to Druid's working directory)|
|
||||
|`druid.extensions.hadoopDependenciesDir`|The root hadoop dependencies directory where user can put hadoop related dependencies files. Druid will load the dependencies based on the hadoop coordinate specified in the hadoop index task.|`hadoop_dependencies` (This is a relative path to Druid's working directory|
|
||||
|`druid.extensions.loadList`|A JSON array of extensions to load from extension directories by Druid. If it is not specified, its value will be `null` and Druid will load all the extensions under `druid.extensions.directory`. If its value is empty list `[]`, then no extensions will be loaded at all.|null|
|
||||
|`druid.extensions.searchCurrentClassloader`|This is a boolean flag that determines if Druid will search the main classloader for extensions. It defaults to true but can be turned off if you have reason to not automatically add all modules on the classpath.|true|
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ This will compile the project and create the Druid binary distribution tar under
|
|||
`distribution/target/druid-VERSION-bin.tar.gz`.
|
||||
|
||||
This will also create a tarball that contains `mysql-metadata-storage` extension under
|
||||
`distribution/target/mysql-metdata-storage-bin.tar.gz`. If you want Druid to load `mysql-metadata-storage`, you can first untar `druid-VERSION-bin.tar.gz`, then go to ```druid-<version>/druid_extensions```, untar `mysql-metdata-storage-bin.tar.gz` there. Now just specifiy `mysql-metadata-storage` in `druid.extensions.loadList` so that Druid will pick it up. See [Including Extensions](../operations/including-extensions.html) for more infomation.
|
||||
`distribution/target/mysql-metadata-storage-bin.tar.gz`. If you want Druid to load `mysql-metadata-storage`, you can first untar `druid-VERSION-bin.tar.gz`, then go to ```druid-<version>/extensions```, untar `mysql-metadata-storage-bin.tar.gz` there. Now just specifiy `mysql-metadata-storage` in `druid.extensions.loadList` so that Druid will pick it up. See [Including Extensions](../operations/including-extensions.html) for more infomation.
|
||||
|
||||
You can find the example executables in the examples/bin directory:
|
||||
|
||||
|
|
|
@ -144,7 +144,7 @@ The Hadoop Index Task is used to index larger data sets that require the paralle
|
|||
The Hadoop Index Config submitted as part of an Hadoop Index Task is identical to the Hadoop Index Config used by the `HadoopDruidIndexer` except that three fields must be omitted: `segmentOutputPath`, `workingPath`, `metadataUpdateSpec`. The Indexing Service takes care of setting these fields internally.
|
||||
|
||||
Note: Before using Hadoop Index Task, please make sure to include Hadoop dependencies so that Druid knows where to pick them up during runtime, see [Include Hadoop Dependencies](../operations/other-hadoop.html).
|
||||
Druid uses hadoop-client 2.3.0 as the default Hadoop version, you can get it from the released Druid tarball(under folder ```hadoop_druid_dependencies```) or use [pull-deps](../pull-deps.html).
|
||||
Druid uses hadoop-client 2.3.0 as the default Hadoop version, you can get it from the released Druid tarball(under folder ```hadoop_dependencies```) or use [pull-deps](../pull-deps.html).
|
||||
|
||||
#### Using your own Hadoop distribution
|
||||
|
||||
|
|
|
@ -25,12 +25,12 @@ To let Druid load your extensions, follow the steps below
|
|||
|
||||
Example:
|
||||
|
||||
Suppose you specify `druid.extensions.directory=/usr/local/druid/druid_extensions`, and want Druid to load normal extensions ```druid-examples```, ```druid-kafka-eight``` and ```mysql-metadata-storage```.
|
||||
Suppose you specify `druid.extensions.directory=/usr/local/druid/extensions`, and want Druid to load normal extensions ```druid-examples```, ```druid-kafka-eight``` and ```mysql-metadata-storage```.
|
||||
|
||||
Then under ```druid_extensions```, it should look like this,
|
||||
Then under ```extensions```, it should look like this,
|
||||
|
||||
```
|
||||
druid_extensions/
|
||||
extensions/
|
||||
├── druid-examples
|
||||
│ ├── commons-beanutils-1.8.3.jar
|
||||
│ ├── commons-digester-1.8.jar
|
||||
|
@ -61,7 +61,7 @@ druid_extensions/
|
|||
└── mysql-metadata-storage-0.8.0-rc1.jar
|
||||
```
|
||||
|
||||
As you can see, under ```druid_extensions``` there are three sub-directories ```druid-examples```, ```druid-kafka-eight``` and ```mysql-metadata-storage```, each sub-directory denotes an extension that Druid might load.
|
||||
As you can see, under ```extensions``` there are three sub-directories ```druid-examples```, ```druid-kafka-eight``` and ```mysql-metadata-storage```, each sub-directory denotes an extension that Druid might load.
|
||||
|
||||
3) Tell Druid which extensions to load. Now you have prepared your extension directories, if you want Druid to load a specific list of extensions under root extension directory, you need to specify `druid.extensions.loadList`. Using the example above, if you want Druid to load ```druid-kafka-eight``` and ```mysql-metadata-storage```, you can specify `druid.extensions.loadList=["druid-kafka-eight", "mysql-metadata-storage"]`.
|
||||
|
||||
|
|
|
@ -19,12 +19,12 @@ To make this work, follow the steps below
|
|||
|
||||
Example:
|
||||
|
||||
Suppose you specify `druid.extensions.hadoopDependenciesDir=/usr/local/druid/hadoop_druid_dependencies`, and you want to prepare both `hadoop-client` 2.3.0 and 2.4.0 for Druid,
|
||||
Suppose you specify `druid.extensions.hadoopDependenciesDir=/usr/local/druid/hadoop_dependencies`, and you want to prepare both `hadoop-client` 2.3.0 and 2.4.0 for Druid,
|
||||
|
||||
Then you can either use [pull-deps](../pull-deps.html) or manually set up Hadoop dependencies directories such that under ```hadoop_druid_dependencies```, it looks like this,
|
||||
Then you can either use [pull-deps](../pull-deps.html) or manually set up Hadoop dependencies directories such that under ```hadoop_dependencies```, it looks like this,
|
||||
|
||||
```
|
||||
hadoop_druid_dependencies/
|
||||
hadoop_dependencies/
|
||||
└── hadoop-client
|
||||
├── 2.3.0
|
||||
│ ├── activation-1.1.jar
|
||||
|
|
|
@ -50,8 +50,8 @@ Suppose you want to download ```druid-examples```, ```mysql-metadata-storage```
|
|||
Because `--clean` is supplied, this command will first remove the directories specified at `druid.extensions.directory` and `druid.extensions.hadoopDependenciesDir`, then recreate them and start downloading the extensions there. After finishing downloading, if you go to the extension directories you specified, you will see
|
||||
|
||||
```
|
||||
tree druid_extensions
|
||||
druid_extensions
|
||||
tree extensions
|
||||
extensions
|
||||
├── druid-examples
|
||||
│ ├── commons-beanutils-1.8.3.jar
|
||||
│ ├── commons-digester-1.8.jar
|
||||
|
@ -68,8 +68,8 @@ druid_extensions
|
|||
```
|
||||
|
||||
```
|
||||
tree hadoop_druid_dependencies
|
||||
hadoop_druid_dependencies/
|
||||
tree hadoop_dependencies
|
||||
hadoop_dependencies/
|
||||
└── hadoop-client
|
||||
├── 2.3.0
|
||||
│ ├── activation-1.1.jar
|
||||
|
|
|
@ -74,8 +74,8 @@ You should see a bunch of files:
|
|||
* run_example_server.sh
|
||||
* run_example_client.sh
|
||||
* LICENSE, config, examples, lib directories
|
||||
* druid_extensions (This folder contains all the extensions that could be loaded by Druid. Note that extension `mysql-metadata-storage` is packaged in a separate tarball that can be downloaded from [here](http://druid.io/downloads.html). See [Including Extensions](../operations/including-extensions.html) for more information about loading extensions.
|
||||
* hadoop_druid_dependencies (This folder contains hadoop-client:2.3.0, see [Different Hadoop Versions](../operations/other-hadoop.html) for more information about how Druid picks up Hadoop dependencies)
|
||||
* extensions (This folder contains all the extensions that could be loaded by Druid. Note that extension `mysql-metadata-storage` is packaged in a separate tarball that can be downloaded from [here](http://druid.io/downloads.html). See [Including Extensions](../operations/including-extensions.html) for more information about loading extensions.
|
||||
* hadoop_dependencies (This folder contains hadoop-client:2.3.0, see [Different Hadoop Versions](../operations/other-hadoop.html) for more information about how Druid picks up Hadoop dependencies)
|
||||
|
||||
## External Dependencies
|
||||
|
||||
|
|
|
@ -247,7 +247,7 @@ Most common data ingestion problems are around timestamp formats and other malfo
|
|||
|
||||
Druid is designed for large data volumes, and most real-world data sets require batch indexing be done through a Hadoop job.
|
||||
|
||||
For this tutorial, we used [Hadoop 2.3.0](https://archive.apache.org/dist/hadoop/core/hadoop-2.3.0/), which is included under ```hadoop_druid_dependencies```. There are many pages on the Internet showing how to set up a single-node (standalone) Hadoop cluster, which is all that's needed for this example. For more information about how Druid picks up your Hadoop version, see [here](../operations/other-hadoop.html).
|
||||
For this tutorial, we used [Hadoop 2.3.0](https://archive.apache.org/dist/hadoop/core/hadoop-2.3.0/), which is included under ```hadoop_dependencies```. There are many pages on the Internet showing how to set up a single-node (standalone) Hadoop cluster, which is all that's needed for this example. For more information about how Druid picks up your Hadoop version, see [here](../operations/other-hadoop.html).
|
||||
|
||||
Before indexing the data, make sure you have a valid Hadoop cluster running. To build our Druid segment, we are going to submit a [Hadoop index task](../misc/tasks.html) to the indexing service. The grammar for the Hadoop index task is very similar to the index task of the last tutorial. The tutorial Hadoop index task should be located at:
|
||||
|
||||
|
|
|
@ -31,10 +31,10 @@ public class ExtensionsConfig
|
|||
private boolean searchCurrentClassloader = true;
|
||||
|
||||
@JsonProperty
|
||||
private String directory = "druid_extensions";
|
||||
private String directory = "extensions";
|
||||
|
||||
@JsonProperty
|
||||
private String hadoopDependenciesDir = "hadoop_druid_dependencies";
|
||||
private String hadoopDependenciesDir = "hadoop_dependencies";
|
||||
|
||||
@JsonProperty
|
||||
private List<String> loadList;
|
||||
|
|
|
@ -78,7 +78,7 @@ public class PullDependenciesTest
|
|||
extensionToJars.put(hadoop_client_2_3_0, ImmutableList.of("f.jar", "g.jar"));
|
||||
extensionToJars.put(hadoop_client_2_4_0, ImmutableList.of("h.jar", "i.jar"));
|
||||
|
||||
rootExtensionsDir = new File(temporaryFolder.getRoot(), "druid_extensions");
|
||||
rootExtensionsDir = new File(temporaryFolder.getRoot(), "extensions");
|
||||
rootHadoopDependenciesDir = new File(temporaryFolder.getRoot(), "druid_hadoop_dependencies");
|
||||
|
||||
pullDependencies = new PullDependencies(
|
||||
|
|
Loading…
Reference in New Issue