From 7ee509bcd00fd53136bd52564e3ceae823904d28 Mon Sep 17 00:00:00 2001 From: Himanshu Gupta Date: Thu, 30 Jul 2015 22:05:05 -0500 Subject: [PATCH] fix mysql references in tutorial docs --- docs/content/design/index.md | 2 +- docs/content/ingestion/batch-ingestion.md | 2 +- docs/content/tutorials/tutorial-loading-batch-data.md | 4 ++-- docs/content/tutorials/tutorial-loading-streaming-data.md | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/content/design/index.md b/docs/content/design/index.md index 58a7442de07..bdeac7466ae 100644 --- a/docs/content/design/index.md +++ b/docs/content/design/index.md @@ -118,7 +118,7 @@ Druid has a couple of external dependencies for cluster operations. * **Metadata Storage** Druid relies on a metadata storage to store metadata about segments and configuration. Services that create segments write new entries to the metadata store and the coordinator nodes monitor the metadata store to know when new data needs to be loaded or old data needs to be dropped. The metadata store is not - involved in the query path. MySQL and PostgreSQL are popular metadata stores. + involved in the query path. MySQL and PostgreSQL are popular metadata stores for production, but Derby can be used for experimentation when you are running all druid nodes on a single machine. * **Deep Storage** Deep storage acts as a permanent backup of segments. Services that create segments upload segments to deep storage and historical nodes download segments from deep storage. Deep storage is not involved in the query path. S3 and HDFS are popular deep storages. diff --git a/docs/content/ingestion/batch-ingestion.md b/docs/content/ingestion/batch-ingestion.md index e5661bffe16..35a671c6769 100644 --- a/docs/content/ingestion/batch-ingestion.md +++ b/docs/content/ingestion/batch-ingestion.md @@ -79,7 +79,7 @@ The spec\_file is a path to a file that contains JSON and an example looks like: }, "metadataUpdateSpec" : { "type":"mysql", - "connectURI" : "jdbc:metadata storage://localhost:3306/druid", + "connectURI" : "jdbc:mysql://localhost:3306/druid", "password" : "diurd", "segmentTable" : "druid_segments", "user" : "druid" diff --git a/docs/content/tutorials/tutorial-loading-batch-data.md b/docs/content/tutorials/tutorial-loading-batch-data.md index 406124b756d..ed6ccacb905 100644 --- a/docs/content/tutorials/tutorial-loading-batch-data.md +++ b/docs/content/tutorials/tutorial-loading-batch-data.md @@ -59,9 +59,9 @@ The following events should exist in the file: #### Set Up a Druid Cluster -To index the data, we are going to need an indexing service, a historical node, and a coordinator node. +To index the data, we are going to need the overlord, a historical node, and a coordinator node. -Note: If Zookeeper and MySQL aren't running, you'll have to start them again as described in [The Druid Cluster](../tutorials/tutorial-the-druid-cluster.html). +Note: If Zookeeper isn't running, you'll have to start it again as described in [The Druid Cluster](../tutorials/tutorial-the-druid-cluster.html). To start the Indexing Service: diff --git a/docs/content/tutorials/tutorial-loading-streaming-data.md b/docs/content/tutorials/tutorial-loading-streaming-data.md index 01a25347627..18139bc47e8 100644 --- a/docs/content/tutorials/tutorial-loading-streaming-data.md +++ b/docs/content/tutorials/tutorial-loading-streaming-data.md @@ -18,8 +18,8 @@ tutorials](tutorial-a-first-look-at-druid.html#about-the-data). At this point, you should already have Druid downloaded and be comfortable running a Druid cluster locally. If not, [have a look at our second -tutorial](../tutorials/tutorial-the-druid-cluster.html). If Zookeeper and MySQL are not -running, you will have to start them as described in [The Druid +tutorial](../tutorials/tutorial-the-druid-cluster.html). If Zookeeper is not +running, you will have to start it as described in [The Druid Cluster](../tutorials/tutorial-the-druid-cluster.html). With real-world data, we recommend having a message bus such as [Apache