MySQL extension with MariaDB connector docs (#11608)

* add docs for mariadb support via mysql extensions

* add logging so you know what druid knows

* homogenize

* spelling

* missed a couple
This commit is contained in:
Clint Wylie 2021-08-19 01:52:26 -07:00 committed by GitHub
parent 38ebaee0fd
commit ec334a641b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
39 changed files with 86 additions and 91 deletions

View File

@ -559,9 +559,9 @@ These properties do not apply to metadata storage connections.
|Property|Possible Values|Description|Default| |Property|Possible Values|Description|Default|
|--------|---------------|-----------|-------| |--------|---------------|-----------|-------|
|`druid.access.jdbc.enforceAllowedProperties`|Boolean|When true, Druid applies `druid.access.jdbc.allowedProperties` to JDBC connections starting with `jdbc:postgresql:` or `jdbc:mysql:`. When false, Druid allows any kind of JDBC connections without JDBC property validation. This config is for backward compatibility especially during upgrades since enforcing allow list can break existing ingestion jobs or lookups based on JDBC. This config is deprecated and will be removed in a future release.|true| |`druid.access.jdbc.enforceAllowedProperties`|Boolean|When true, Druid applies `druid.access.jdbc.allowedProperties` to JDBC connections starting with `jdbc:postgresql:`, `jdbc:mysql:`, or `jdbc:mariadb:`. When false, Druid allows any kind of JDBC connections without JDBC property validation. This config is for backward compatibility especially during upgrades since enforcing allow list can break existing ingestion jobs or lookups based on JDBC. This config is deprecated and will be removed in a future release.|true|
|`druid.access.jdbc.allowedProperties`|List of JDBC properties|Defines a list of allowed JDBC properties. Druid always enforces the list for all JDBC connections starting with `jdbc:postgresql:` or `jdbc:mysql:` if `druid.access.jdbc.enforceAllowedProperties` is set to true.<br/><br/>This option is tested against MySQL connector 5.1.48 and PostgreSQL connector 42.2.14. Other connector versions might not work.|["useSSL", "requireSSL", "ssl", "sslmode"]| |`druid.access.jdbc.allowedProperties`|List of JDBC properties|Defines a list of allowed JDBC properties. Druid always enforces the list for all JDBC connections starting with `jdbc:postgresql:`, `jdbc:mysql:`, and `jdbc:mariadb:` if `druid.access.jdbc.enforceAllowedProperties` is set to true.<br/><br/>This option is tested against MySQL connector 5.1.48, MariaDB connector 2.7.4, and PostgreSQL connector 42.2.14. Other connector versions might not work.|["useSSL", "requireSSL", "ssl", "sslmode"]|
|`druid.access.jdbc.allowUnknownJdbcUrlFormat`|Boolean|When false, Druid only accepts JDBC connections starting with `jdbc:postgresql:` or `jdbc:mysql:`. When true, Druid allows JDBC connections to any kind of database, but only enforces `druid.access.jdbc.allowedProperties` for PostgreSQL and MySQL.|true| |`druid.access.jdbc.allowUnknownJdbcUrlFormat`|Boolean|When false, Druid only accepts JDBC connections starting with `jdbc:postgresql:` or `jdbc:mysql:`. When true, Druid allows JDBC connections to any kind of database, but only enforces `druid.access.jdbc.allowedProperties` for PostgreSQL and MySQL/MariaDB.|true|
### Task Logging ### Task Logging

View File

@ -24,7 +24,7 @@ title: "Dropwizard metrics emitter"
# Dropwizard Emitter # Dropwizard Emitter
To use this extension, make sure to [include](../../development/extensions.md#loading-extensions) `dropwizard-emitter` extension. To use this extension, make sure to [include](../../development/extensions.md#loading-extensions) `dropwizard-emitter` in the extensions load list.
## Introduction ## Introduction

View File

@ -23,7 +23,7 @@ title: "Ambari Metrics Emitter"
--> -->
To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `ambari-metrics-emitter` extension. To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `ambari-metrics-emitter` in the extensions load list.
## Introduction ## Introduction

View File

@ -23,7 +23,7 @@ title: "Apache Cassandra"
--> -->
To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-cassandra-storage` extension. To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `druid-cassandra-storage` in the extensions load list.
[Apache Cassandra](http://www.datastax.com/what-we-offer/products-services/datastax-enterprise/apache-cassandra) can also [Apache Cassandra](http://www.datastax.com/what-we-offer/products-services/datastax-enterprise/apache-cassandra) can also
be leveraged for deep storage. This requires some additional Druid configuration as well as setting up the necessary be leveraged for deep storage. This requires some additional Druid configuration as well as setting up the necessary

View File

@ -23,7 +23,7 @@ title: "Rackspace Cloud Files"
--> -->
To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-cloudfiles-extensions` extension. To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `druid-cloudfiles-extensions` in the extensions load list.
## Deep Storage ## Deep Storage

View File

@ -23,7 +23,7 @@ title: "DistinctCount Aggregator"
--> -->
To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) the `druid-distinctcount` extension. To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) the `druid-distinctcount` in the extensions load list.
Additionally, follow these steps: Additionally, follow these steps:

View File

@ -23,7 +23,7 @@ title: "GCE Extensions"
--> -->
To use this Apache Druid (incubating) extension, make sure to [include](../../development/extensions.md#loading-extensions) `gce-extensions`. To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `gce-extensions` in the extensions load list.
At the moment, this extension enables only Druid to autoscale instances in GCE. At the moment, this extension enables only Druid to autoscale instances in GCE.

View File

@ -23,7 +23,7 @@ title: "Graphite Emitter"
--> -->
To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `graphite-emitter` extension. To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `graphite-emitter` in the extensions load list.
## Introduction ## Introduction

View File

@ -23,7 +23,7 @@ title: "InfluxDB Line Protocol Parser"
--> -->
To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-influx-extensions`. To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `druid-influx-extensions` in the extensions load list.
This extension enables Druid to parse the [InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/v1.5/write_protocols/line_protocol_tutorial/), a popular text-based timeseries metric serialization format. This extension enables Druid to parse the [InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/v1.5/write_protocols/line_protocol_tutorial/), a popular text-based timeseries metric serialization format.

View File

@ -23,7 +23,7 @@ title: "InfluxDB Emitter"
--> -->
To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-influxdb-emitter` extension. To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `druid-influxdb-emitter` in the extensions load list.
## Introduction ## Introduction

View File

@ -23,7 +23,7 @@ title: "Kafka Emitter"
--> -->
To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `kafka-emitter` extension. To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `kafka-emitter` in the extensions load list.
## Introduction ## Introduction

View File

@ -26,11 +26,7 @@ title: "Moment Sketches for Approximate Quantiles module"
This module provides aggregators for approximate quantile queries using the [momentsketch](https://github.com/stanford-futuredata/momentsketch) library. This module provides aggregators for approximate quantile queries using the [momentsketch](https://github.com/stanford-futuredata/momentsketch) library.
The momentsketch provides coarse quantile estimates with less space and aggregation time overheads than traditional sketches, approaching the performance of counts and sums by reconstructing distributions from computed statistics. The momentsketch provides coarse quantile estimates with less space and aggregation time overheads than traditional sketches, approaching the performance of counts and sums by reconstructing distributions from computed statistics.
To use this Apache Druid extension, make sure you [include](../../development/extensions.md#loading-extensions) the extension in your config file: To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) in the extensions load list.
```
druid.extensions.loadList=["druid-momentsketch"]
```
### Aggregator ### Aggregator

View File

@ -23,7 +23,7 @@ title: "OpenTSDB Emitter"
--> -->
To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `opentsdb-emitter` extension. To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `opentsdb-emitter` in the extensions load list.
## Introduction ## Introduction

View File

@ -23,7 +23,7 @@ title: "Prometheus Emitter"
--> -->
To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `prometheus-emitter` extension. To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `prometheus-emitter` in the extensions load list.
## Introduction ## Introduction

View File

@ -23,7 +23,7 @@ title: "Microsoft SQLServer"
--> -->
To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `sqlserver-metadata-storage` as an extension. To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `sqlserver-metadata-storage` in the extensions load list.
## Setting up SQLServer ## Setting up SQLServer

View File

@ -23,7 +23,7 @@ title: "StatsD Emitter"
--> -->
To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `statsd-emitter` extension. To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `statsd-emitter` in the extensions load list.
## Introduction ## Introduction

View File

@ -23,7 +23,7 @@ title: "Thrift"
--> -->
To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-thrift-extensions`. To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `druid-thrift-extensions` in the extensions load list.
This extension enables Druid to ingest thrift compact data online (`ByteBuffer`) and offline (SequenceFile of type `<Writable, BytesWritable>` or LzoThriftBlock File). This extension enables Druid to ingest thrift compact data online (`ByteBuffer`) and offline (SequenceFile of type `<Writable, BytesWritable>` or LzoThriftBlock File).

View File

@ -23,7 +23,7 @@ title: "Timestamp Min/Max aggregators"
--> -->
To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-time-min-max`. To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `druid-time-min-max` in the extensions load list.
These aggregators enable more precise calculation of min and max time of given events than `__time` column whose granularity is sparse, the same as query granularity. These aggregators enable more precise calculation of min and max time of given events than `__time` column whose granularity is sparse, the same as query granularity.
To use this feature, a "timeMin" or "timeMax" aggregator must be included at indexing time. To use this feature, a "timeMin" or "timeMax" aggregator must be included at indexing time.

View File

@ -23,7 +23,7 @@ title: "Approximate Histogram aggregators"
--> -->
To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-histogram` as an extension. To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `druid-histogram` in the extensions load list.
The `druid-histogram` extension provides an approximate histogram aggregator and a fixed buckets histogram aggregator. The `druid-histogram` extension provides an approximate histogram aggregator and a fixed buckets histogram aggregator.

View File

@ -33,7 +33,7 @@ Additionally, it provides an InputFormat for reading Avro OCF files when using
[native batch indexing](../../ingestion/native-batch.md), see [Avro OCF](../../ingestion/data-formats.md#avro-ocf) [native batch indexing](../../ingestion/native-batch.md), see [Avro OCF](../../ingestion/data-formats.md#avro-ocf)
for details on how to ingest OCF files. for details on how to ingest OCF files.
Make sure to [include](../../development/extensions.md#loading-extensions) `druid-avro-extensions` as an extension. Make sure to [include](../../development/extensions.md#loading-extensions) `druid-avro-extensions` in the extensions load list.
### Avro Types ### Avro Types

View File

@ -23,7 +23,7 @@ title: "Microsoft Azure"
--> -->
To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-azure-extensions` extension. To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `druid-azure-extensions` in the extensions load list.
## Deep Storage ## Deep Storage

View File

@ -23,13 +23,12 @@ title: "Bloom Filter"
--> -->
This Apache Druid extension adds the ability to both construct bloom filters from query results, and filter query results by testing To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `druid-bloom-filter` in the extensions load list.
against a bloom filter. Make sure to [include](../../development/extensions.md#loading-extensions) `druid-bloom-filter` as an
extension.
A Bloom filter is a probabilistic data structure for performing a set membership check. A bloom filter is a good candidate This extension adds the ability to both construct bloom filters from query results, and filter query results by testing
to use with Druid for cases where an explicit filter is impossible, e.g. filtering a query against a set of millions of against a bloom filter. A Bloom filter is a probabilistic data structure for performing a set membership check. A bloom
values. filter is a good candidate to use with Druid for cases where an explicit filter is impossible, e.g. filtering a query
against a set of millions of values.
Following are some characteristics of Bloom filters: Following are some characteristics of Bloom filters:

View File

@ -25,7 +25,7 @@ title: "Kerberos"
Apache Druid Extension to enable Authentication for Druid Processes using Kerberos. Apache Druid Extension to enable Authentication for Druid Processes using Kerberos.
This extension adds an Authenticator which is used to protect HTTP Endpoints using the simple and protected GSSAPI negotiation mechanism [SPNEGO](https://en.wikipedia.org/wiki/SPNEGO). This extension adds an Authenticator which is used to protect HTTP Endpoints using the simple and protected GSSAPI negotiation mechanism [SPNEGO](https://en.wikipedia.org/wiki/SPNEGO).
Make sure to [include](../../development/extensions.md#loading-extensions) `druid-kerberos` as an extension. Make sure to [include](../../development/extensions.md#loading-extensions) `druid-kerberos` in the extensions load list.
## Configuration ## Configuration

View File

@ -31,12 +31,12 @@ The main goal of this cache is to speed up the access to a high latency lookup s
Thus user can define various caching strategies or and implementation per lookup, even if the source is the same. Thus user can define various caching strategies or and implementation per lookup, even if the source is the same.
This module can be used side to side with other lookup module like the global cached lookup module. This module can be used side to side with other lookup module like the global cached lookup module.
To use this extension please make sure to [include](../../development/extensions.md#loading-extensions) `druid-lookups-cached-single` as an extension. To use this Apache Druid extension, [include](../extensions.md#loading-extensions) `druid-lookups-cached-single` in the extensions load list.
> If using JDBC, you will need to add your database's client JAR files to the extension's directory. > If using JDBC, you will need to add your database's client JAR files to the extension's directory.
> For Postgres, the connector JAR is already included. > For Postgres, the connector JAR is already included.
> For MySQL, you can get it from https://dev.mysql.com/downloads/connector/j/. > See the MySQL extension documentation for instructions to obtain [MySQL](./mysql.md#installing-the-mysql-connector-library) or [MariaDB](./mysql.md#alternative-installing-the-mariadb-connector-library) connector libraries.
> Copy or symlink the downloaded file inside the folder `extensions/druid-lookups-cached-single` under the distribution root directory. > Copy or symlink the downloaded file to `extensions/druid-lookups-cached-single` under the distribution root directory.
## Architecture ## Architecture
Generally speaking this module can be divided into two main component, namely, the data fetcher layer and caching layer. Generally speaking this module can be divided into two main component, namely, the data fetcher layer and caching layer.

View File

@ -24,7 +24,7 @@ title: "Apache Ranger Security"
This Apache Druid extension adds an Authorizer which implements access control for Druid, backed by [Apache Ranger](https://ranger.apache.org/). Please see [Authentication and Authorization](../../design/auth.md) for more information on the basic facilities this extension provides. This Apache Druid extension adds an Authorizer which implements access control for Druid, backed by [Apache Ranger](https://ranger.apache.org/). Please see [Authentication and Authorization](../../design/auth.md) for more information on the basic facilities this extension provides.
Make sure to [include](../../development/extensions.md#loading-extensions) `druid-ranger-security` as an extension. Make sure to [include](../../development/extensions.md#loading-extensions) `druid-ranger-security` in the extensions load list.
> The latest release of Apache Ranger is at the time of writing version 2.0. This version has a dependency on `log4j 1.2.17` which has a vulnerability if you configure it to use a `SocketServer` (CVE-2019-17571). Next to that, it also includes Kafka 2.0.0 which has 2 known vulnerabilities (CVE-2019-12399, CVE-2018-17196). Kafka can be used by the audit component in Ranger, but is not required. > The latest release of Apache Ranger is at the time of writing version 2.0. This version has a dependency on `log4j 1.2.17` which has a vulnerability if you configure it to use a `SocketServer` (CVE-2019-17571). Next to that, it also includes Kafka 2.0.0 which has 2 known vulnerabilities (CVE-2019-12399, CVE-2018-17196). Kafka can be used by the audit component in Ranger, but is not required.

View File

@ -28,7 +28,7 @@ This extension allows you to do 2 things:
* [Ingest data](#reading-data-from-google-cloud-storage) from files stored in Google Cloud Storage. * [Ingest data](#reading-data-from-google-cloud-storage) from files stored in Google Cloud Storage.
* Write segments to [deep storage](#deep-storage) in GCS. * Write segments to [deep storage](#deep-storage) in GCS.
To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-google-extensions` extension. To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `druid-google-extensions` in the extensions load list.
### Required Configuration ### Required Configuration

View File

@ -23,7 +23,7 @@ title: "HDFS"
--> -->
To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-hdfs-storage` as an extension and run druid processes with `GOOGLE_APPLICATION_CREDENTIALS=/path/to/service_account_keyfile` in the environment. To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `druid-hdfs-storage` in the extensions load list and run druid processes with `GOOGLE_APPLICATION_CREDENTIALS=/path/to/service_account_keyfile` in the environment.
## Deep Storage ## Deep Storage

View File

@ -24,7 +24,7 @@ title: "Apache Kafka Lookups"
> Lookups are an [experimental](../experimental.md) feature. > Lookups are an [experimental](../experimental.md) feature.
To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-lookups-cached-global` and `druid-kafka-extraction-namespace` as an extension. To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `druid-lookups-cached-global` and `druid-kafka-extraction-namespace` in the extensions load list.
If you need updates to populate as promptly as possible, it is possible to plug into a Kafka topic whose key is the old value and message is the desired new value (both in UTF-8) as a LookupExtractorFactory. If you need updates to populate as promptly as possible, it is possible to plug into a Kafka topic whose key is the old value and message is the desired new value (both in UTF-8) as a LookupExtractorFactory.

View File

@ -29,7 +29,7 @@ Apache Druid Extension to enable using Kubernetes API Server for node discovery
## Configuration ## Configuration
To use this extension please make sure to [include](../../development/extensions.md#loading-extensions) `druid-kubernetes-extensions` as an extension. To use this extension please make sure to [include](../../development/extensions.md#loading-extensions) `druid-kubernetes-extensions` in the extensions load list.
This extension works together with HTTP based segment and task management in Druid. Consequently, following configurations must be set on all Druid nodes. This extension works together with HTTP based segment and task management in Druid. Consequently, following configurations must be set on all Druid nodes.

View File

@ -25,7 +25,7 @@ title: "Globally Cached Lookups"
> Lookups are an [experimental](../experimental.md) feature. > Lookups are an [experimental](../experimental.md) feature.
To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-lookups-cached-global` as an extension. To use this Apache Druid extension, [include](../extensions.md#loading-extensions) `druid-lookups-cached-global` in the extensions load list.
## Configuration ## Configuration
> Static configuration is no longer supported. Lookups can be configured through > Static configuration is no longer supported. Lookups can be configured through
@ -370,7 +370,7 @@ The JDBC lookups will poll a database to populate its local cache. If the `tsCol
> If using JDBC, you will need to add your database's client JAR files to the extension's directory. > If using JDBC, you will need to add your database's client JAR files to the extension's directory.
> For Postgres, the connector JAR is already included. > For Postgres, the connector JAR is already included.
> For MySQL, you can get it from https://dev.mysql.com/downloads/connector/j/. > See the MySQL extension documentation for instructions to obtain [MySQL](./mysql.md#installing-the-mysql-connector-library) or [MariaDB](./mysql.md#alternative-installing-the-mariadb-connector-library) connector libraries.
> The connector JAR should reside in the classpath of Druid's main class loader. > The connector JAR should reside in the classpath of Druid's main class loader.
> To add the connector JAR to the classpath, you can copy the downloaded file to `lib/` under the distribution root directory. Alternatively, create a symbolic link to the connector in the `lib` directory. > To add the connector JAR to the classpath, you can copy the downloaded file to `lib/` under the distribution root directory. Alternatively, create a symbolic link to the connector in the `lib` directory.

View File

@ -23,24 +23,40 @@ title: "MySQL Metadata Store"
--> -->
To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `mysql-metadata-storage` as an extension. To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `mysql-metadata-storage` in the extensions load list.
> The MySQL extension requires the MySQL Connector/J library which is not included in the Druid distribution. > The MySQL extension requires the MySQL Connector/J library or MariaDB Connector/J library, neither of which are included in the Druid distribution.
> Refer to the following section for instructions on how to install this library. > Refer to the following section for instructions on how to install this library.
## Installing the MySQL connector library ## Installing the MySQL connector library
This extension uses Oracle's MySQL JDBC driver which is not included in the Druid distribution and must be This extension can use Oracle's MySQL JDBC driver which is not included in the Druid distribution. You must
installed separately. There are a few ways to obtain this library: install it separately. There are a few ways to obtain this library:
- It can be downloaded from the MySQL site at: https://dev.mysql.com/downloads/connector/j/ - It can be downloaded from the MySQL site at: https://dev.mysql.com/downloads/connector/j/
- It can be fetched from Maven Central at: https://repo1.maven.org/maven2/mysql/mysql-connector-java/5.1.48/mysql-connector-java-5.1.48.jar - It can be fetched from Maven Central at: https://repo1.maven.org/maven2/mysql/mysql-connector-java/5.1.48/mysql-connector-java-5.1.48.jar
- It may be available through your package manager, e.g. as `libmysql-java` on APT for a Debian-based OS - It may be available through your package manager, e.g. as `libmysql-java` on APT for a Debian-based OS
This should fetch a JAR file named similar to 'mysql-connector-java-x.x.xx.jar'. This fetches the MySQL connector JAR file with a name like `mysql-connector-java-5.1.48.jar`.
Copy or symlink this file inside the folder `extensions/mysql-metadata-storage` under the distribution root directory. Copy or symlink this file inside the folder `extensions/mysql-metadata-storage` under the distribution root directory.
## Alternative: Installing the MariaDB connector library
This extension also supports using the MariaDB connector jar, though it is also not included in the Druid distribution, so you must install it separately.
- Download from the MariaDB site: https://mariadb.com/downloads/connector
- Download from Maven Central: https://repo1.maven.org/maven2/org/mariadb/jdbc/mariadb-java-client/2.7.3/mariadb-java-client-2.7.3.jar
This fetches the MariaDB connector JAR file with a name like `maria-java-client-2.7.3.jar`.
Copy or symlink this file to `extensions/mysql-metadata-storage` under the distribution root directory.
To configure the `mysql-metadata-storage` extension to use the MariaDB connector library instead of MySQL, set `druid.metadata.mysql.driver.driverClassName=org.mariadb.jdbc.Driver`.
Depending on the MariaDB client library version, the connector supports both `jdbc:mysql:` and `jdbc:mariadb:` connection URIs. However, the parameters to configure the connection vary between implementations, so be sure to [check the documentation](https://mariadb.com/kb/en/about-mariadb-connector-j/#connection-strings) for details.
## Setting up MySQL ## Setting up MySQL
1. Install MySQL 1. Install MySQL
@ -51,7 +67,9 @@ Copy or symlink this file inside the folder `extensions/mysql-metadata-storage`
Alternatively, download and follow installation instructions for MySQL Alternatively, download and follow installation instructions for MySQL
Community Server here: Community Server here:
[http://dev.mysql.com/downloads/mysql/](http://dev.mysql.com/downloads/mysql/) [http://dev.mysql.com/downloads/mysql/](http://dev.mysql.com/downloads/mysql/).
This extension also supports using MariaDB server, https://mariadb.org/download/, substituting for MariaDB in the following instructions where appropriate.
2. Create a druid database and user 2. Create a druid database and user
@ -87,6 +105,8 @@ Copy or symlink this file inside the folder `extensions/mysql-metadata-storage`
druid.metadata.storage.connector.password=diurd druid.metadata.storage.connector.password=diurd
``` ```
If using the MariaDB connector library, set `druid.metadata.mysql.driver.driverClassName=org.mariadb.jdbc.Driver`.
## Encrypting MySQL connections ## Encrypting MySQL connections
This extension provides support for encrypting MySQL connections. To get more information about encrypting MySQL connections using TLS/SSL in general, please refer to this [guide](https://dev.mysql.com/doc/refman/5.7/en/using-encrypted-connections.html). This extension provides support for encrypting MySQL connections. To get more information about encrypting MySQL connections using TLS/SSL in general, please refer to this [guide](https://dev.mysql.com/doc/refman/5.7/en/using-encrypted-connections.html).
@ -105,9 +125,9 @@ Copy or symlink this file inside the folder `extensions/mysql-metadata-storage`
|`druid.metadata.mysql.ssl.enabledSSLCipherSuites`|Overrides the existing cipher suites with these cipher suites.|none|no| |`druid.metadata.mysql.ssl.enabledSSLCipherSuites`|Overrides the existing cipher suites with these cipher suites.|none|no|
|`druid.metadata.mysql.ssl.enabledTLSProtocols`|Overrides the TLS protocols with these protocols.|none|no| |`druid.metadata.mysql.ssl.enabledTLSProtocols`|Overrides the TLS protocols with these protocols.|none|no|
### MySQL Firehose ### MySQL InputSource and Firehose
The MySQL extension provides an implementation of an [SqlFirehose](../../ingestion/native-batch.md#firehoses-deprecated) which can be used to ingest data into Druid from a MySQL database. The MySQL extension provides a connector implementation of an [SqlInputSource](../../ingestion/native-batch.md#sql-input-source) and [SqlFirehose](../../ingestion/native-batch.md#firehoses-deprecated) which can be used to ingest data into Druid from a MySQL database. This works with either MySQL or MariaDB connector jars.
```json ```json
{ {

View File

@ -30,7 +30,7 @@ The extension provides the [ORC input format](../../ingestion/data-formats.md#or
for [native batch ingestion](../../ingestion/native-batch.md) and [Hadoop batch ingestion](../../ingestion/hadoop.md), respectively. for [native batch ingestion](../../ingestion/native-batch.md) and [Hadoop batch ingestion](../../ingestion/hadoop.md), respectively.
Please see corresponding docs for details. Please see corresponding docs for details.
To use this extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-orc-extensions`. To use this extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-orc-extensions` in the extensions load list.
### Migration from 'contrib' extension ### Migration from 'contrib' extension
This extension, first available in version 0.15.0, replaces the previous 'contrib' extension which was available until This extension, first available in version 0.15.0, replaces the previous 'contrib' extension which was available until

View File

@ -23,7 +23,7 @@ title: "PostgreSQL Metadata Store"
--> -->
To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `postgresql-metadata-storage` as an extension. To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `postgresql-metadata-storage` in the extensions load list.
## Setting up PostgreSQL ## Setting up PostgreSQL

View File

@ -23,7 +23,7 @@ title: "Protobuf"
--> -->
This Apache Druid extension enables Druid to ingest and understand the Protobuf data format. Make sure to [include](../../development/extensions.md#loading-extensions) `druid-protobuf-extensions` as an extension. This Apache Druid extension enables Druid to ingest and understand the Protobuf data format. Make sure to [include](../../development/extensions.md#loading-extensions) `druid-protobuf-extensions` in the extensions load list.
The `druid-protobuf-extensions` provides the [Protobuf Parser](../../ingestion/data-formats.md#protobuf-parser) The `druid-protobuf-extensions` provides the [Protobuf Parser](../../ingestion/data-formats.md#protobuf-parser)
for [stream ingestion](../../ingestion/index.md#streaming). See corresponding docs for details. for [stream ingestion](../../ingestion/index.md#streaming). See corresponding docs for details.

View File

@ -28,7 +28,7 @@ This extension allows you to do 2 things:
* [Ingest data](#reading-data-from-s3) from files stored in S3. * [Ingest data](#reading-data-from-s3) from files stored in S3.
* Write segments to [deep storage](#deep-storage) in S3. * Write segments to [deep storage](#deep-storage) in S3.
To use this Apache Druid extension, make sure to [include](../../development/extensions.md#loading-extensions) `druid-s3-extensions` as an extension. To use this Apache Druid extension, [include](../../development/extensions.md#loading-extensions) `druid-s3-extensions` in the extensions load list.
### Reading data from S3 ### Reading data from S3

View File

@ -23,7 +23,7 @@ title: "Stats aggregator"
--> -->
This Apache Druid extension includes stat-related aggregators, including variance and standard deviations, etc. Make sure to [include](../../development/extensions.md#loading-extensions) `druid-stats` as an extension. This Apache Druid extension includes stat-related aggregators, including variance and standard deviations, etc. Make sure to [include](../../development/extensions.md#loading-extensions) `druid-stats` in the extensions load list.
## Variance aggregator ## Variance aggregator

View File

@ -58,6 +58,7 @@ public class MySQLConnector extends SQLMetadataConnector
{ {
super(config, dbTables); super(config, dbTables);
try { try {
log.info("Loading \"MySQL\" metadata connector driver %s", driverConfig.getDriverClassName());
Class.forName(driverConfig.getDriverClassName(), false, getClass().getClassLoader()); Class.forName(driverConfig.getDriverClassName(), false, getClass().getClassLoader());
} }
catch (ClassNotFoundException e) { catch (ClassNotFoundException e) {

View File

@ -140,6 +140,7 @@ Long.MIN_VALUE
Lucene Lucene
MapBD MapBD
MapDB MapDB
MariaDB
MiddleManager MiddleManager
MiddleManagers MiddleManagers
Montréal Montréal
@ -838,8 +839,6 @@ customJson
lookupParseSpec lookupParseSpec
namespaceParseSpec namespaceParseSpec
simpleJson simpleJson
- ../docs/development/extensions-core/mysql.md
x.xx.jar
- ../docs/development/extensions-core/orc.md - ../docs/development/extensions-core/orc.md
dimensionSpec dimensionSpec
flattenSpec flattenSpec

View File

@ -4201,8 +4201,7 @@
"ansi-regex": { "ansi-regex": {
"version": "2.1.1", "version": "2.1.1",
"bundled": true, "bundled": true,
"dev": true, "dev": true
"optional": true
}, },
"aproba": { "aproba": {
"version": "1.2.0", "version": "1.2.0",
@ -4223,14 +4222,12 @@
"balanced-match": { "balanced-match": {
"version": "1.0.0", "version": "1.0.0",
"bundled": true, "bundled": true,
"dev": true, "dev": true
"optional": true
}, },
"brace-expansion": { "brace-expansion": {
"version": "1.1.11", "version": "1.1.11",
"bundled": true, "bundled": true,
"dev": true, "dev": true,
"optional": true,
"requires": { "requires": {
"balanced-match": "^1.0.0", "balanced-match": "^1.0.0",
"concat-map": "0.0.1" "concat-map": "0.0.1"
@ -4245,20 +4242,17 @@
"code-point-at": { "code-point-at": {
"version": "1.1.0", "version": "1.1.0",
"bundled": true, "bundled": true,
"dev": true, "dev": true
"optional": true
}, },
"concat-map": { "concat-map": {
"version": "0.0.1", "version": "0.0.1",
"bundled": true, "bundled": true,
"dev": true, "dev": true
"optional": true
}, },
"console-control-strings": { "console-control-strings": {
"version": "1.1.0", "version": "1.1.0",
"bundled": true, "bundled": true,
"dev": true, "dev": true
"optional": true
}, },
"core-util-is": { "core-util-is": {
"version": "1.0.2", "version": "1.0.2",
@ -4375,8 +4369,7 @@
"inherits": { "inherits": {
"version": "2.0.4", "version": "2.0.4",
"bundled": true, "bundled": true,
"dev": true, "dev": true
"optional": true
}, },
"ini": { "ini": {
"version": "1.3.5", "version": "1.3.5",
@ -4388,7 +4381,6 @@
"version": "1.0.0", "version": "1.0.0",
"bundled": true, "bundled": true,
"dev": true, "dev": true,
"optional": true,
"requires": { "requires": {
"number-is-nan": "^1.0.0" "number-is-nan": "^1.0.0"
} }
@ -4403,7 +4395,6 @@
"version": "3.0.4", "version": "3.0.4",
"bundled": true, "bundled": true,
"dev": true, "dev": true,
"optional": true,
"requires": { "requires": {
"brace-expansion": "^1.1.7" "brace-expansion": "^1.1.7"
} }
@ -4411,14 +4402,12 @@
"minimist": { "minimist": {
"version": "1.2.5", "version": "1.2.5",
"bundled": true, "bundled": true,
"dev": true, "dev": true
"optional": true
}, },
"minipass": { "minipass": {
"version": "2.9.0", "version": "2.9.0",
"bundled": true, "bundled": true,
"dev": true, "dev": true,
"optional": true,
"requires": { "requires": {
"safe-buffer": "^5.1.2", "safe-buffer": "^5.1.2",
"yallist": "^3.0.0" "yallist": "^3.0.0"
@ -4437,7 +4426,6 @@
"version": "0.5.3", "version": "0.5.3",
"bundled": true, "bundled": true,
"dev": true, "dev": true,
"optional": true,
"requires": { "requires": {
"minimist": "^1.2.5" "minimist": "^1.2.5"
} }
@ -4499,8 +4487,7 @@
"npm-normalize-package-bin": { "npm-normalize-package-bin": {
"version": "1.0.1", "version": "1.0.1",
"bundled": true, "bundled": true,
"dev": true, "dev": true
"optional": true
}, },
"npm-packlist": { "npm-packlist": {
"version": "1.4.8", "version": "1.4.8",
@ -4528,8 +4515,7 @@
"number-is-nan": { "number-is-nan": {
"version": "1.0.1", "version": "1.0.1",
"bundled": true, "bundled": true,
"dev": true, "dev": true
"optional": true
}, },
"object-assign": { "object-assign": {
"version": "4.1.1", "version": "4.1.1",
@ -4541,7 +4527,6 @@
"version": "1.4.0", "version": "1.4.0",
"bundled": true, "bundled": true,
"dev": true, "dev": true,
"optional": true,
"requires": { "requires": {
"wrappy": "1" "wrappy": "1"
} }
@ -4619,8 +4604,7 @@
"safe-buffer": { "safe-buffer": {
"version": "5.1.2", "version": "5.1.2",
"bundled": true, "bundled": true,
"dev": true, "dev": true
"optional": true
}, },
"safer-buffer": { "safer-buffer": {
"version": "2.1.2", "version": "2.1.2",
@ -4656,7 +4640,6 @@
"version": "1.0.2", "version": "1.0.2",
"bundled": true, "bundled": true,
"dev": true, "dev": true,
"optional": true,
"requires": { "requires": {
"code-point-at": "^1.0.0", "code-point-at": "^1.0.0",
"is-fullwidth-code-point": "^1.0.0", "is-fullwidth-code-point": "^1.0.0",
@ -4676,7 +4659,6 @@
"version": "3.0.1", "version": "3.0.1",
"bundled": true, "bundled": true,
"dev": true, "dev": true,
"optional": true,
"requires": { "requires": {
"ansi-regex": "^2.0.0" "ansi-regex": "^2.0.0"
} }
@ -4720,14 +4702,12 @@
"wrappy": { "wrappy": {
"version": "1.0.2", "version": "1.0.2",
"bundled": true, "bundled": true,
"dev": true, "dev": true
"optional": true
}, },
"yallist": { "yallist": {
"version": "3.1.1", "version": "3.1.1",
"bundled": true, "bundled": true,
"dev": true, "dev": true
"optional": true
} }
} }
}, },