Fix broken links in docs, add broken link checker. (#7658)

Also adds back insert-segment-to-db.md with some docs about why and
when it was removed (in #6911).
This commit is contained in:
Gian Merlino 2019-05-15 16:49:50 -05:00 committed by Fangjin Yang
parent 917106985f
commit 0352f450d7
15 changed files with 187 additions and 35 deletions

101
docs/_bin/broken-link-check.py Executable file
View File

@ -0,0 +1,101 @@
#!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import re
import sys
#
# Checks for broken redirects (in _redirects.json) and links from markdown files to
# nonexistent pages. Does _not_ check for links to anchors that don't exist.
#
# Targets to these 'well known' pages are OK.
WELL_KNOWN_PAGES = ["/libraries.html", "/downloads.html", "/community/", "/thanks.html"]
def normalize_link(source, target):
dirname = os.path.dirname(source)
normalized = os.path.normpath(os.path.join(dirname, target))
return normalized
def verify_redirects(docs_directory, redirect_json):
ok = True
with open(redirect_json, 'r') as f:
redirects = json.loads(f.read())
for redirect in redirects:
if redirect["target"] in WELL_KNOWN_PAGES:
continue
# Replace .html and named anchors with .md, and check the file on the filesystem.
target = re.sub(r'\.html(#.*)?$', '.md', normalize_link(redirect["source"], redirect["target"]))
if not os.path.exists(os.path.join(docs_directory, target)):
sys.stderr.write('Redirect [' + redirect["source"] + '] target does not exist: ' + redirect["target"] + "\n")
ok = False
return ok
def verify_markdown(docs_directory):
ok = True
# Get list of markdown files.
markdowns = []
for root, dirs, files in os.walk(docs_directory):
for name in files:
if name.endswith('.md'):
markdowns.append(os.path.join(root, name))
for markdown_file in markdowns:
with open(markdown_file, 'r') as f:
content = f.read()
for m in re.finditer(r'\[([^\[]*?)\]\((.*?)(?: \"[^\"]+\")?\)', content):
target = m.group(2)
if target in WELL_KNOWN_PAGES:
continue
if markdown_file.endswith("/druid-kerberos.md") and target in ['regexp', 'druid@EXAMPLE.COM']:
# Hack to support the fact that rule examples in druid-kerberos docs look sort of like markdown links.
continue
target = re.sub(r'^/docs/VERSION/', '', target)
target = re.sub(r'#.*$', '', target)
target = re.sub(r'\.html$', '.md', target)
target = re.sub(r'/$', '/index.md', target)
if target and not (target.startswith('http://') or target.startswith('https://')):
target_normalized = normalize_link(markdown_file, target)
if not os.path.exists(target_normalized):
sys.stderr.write('Page [' + markdown_file + '] target does not exist: ' + m.group(2) + "\n")
ok = False
return ok
def main():
if len(sys.argv) != 3:
sys.stderr.write('usage: program <docs dir> <redirect.json>\n')
sys.exit(1)
ok = verify_redirects(sys.argv[1], sys.argv[2])
ok = verify_markdown(sys.argv[1]) and ok
if not ok:
sys.exit(1)
main()

View File

@ -88,6 +88,9 @@ remote=$(git -C "$druid" config --local --get "remote.$origin.url")
git clone -q --depth 1 --branch $branch $remote "$src"
if [ -n "$opt_docs" ] ; then
# Check for broken links
"$src/docs/_bin/broken-link-check.py" "$src/docs/content" "$src/docs/_redirects.json"
# Copy docs
mkdir -p $target/docs/$version
rsync -a --delete "$src/docs/content/" $target/docs/$version

View File

@ -17,7 +17,7 @@
{"source": "DataSourceMetadataQuery.html", "target": "querying/datasourcemetadataquery.html"},
{"source": "Data_formats.html", "target": "ingestion/data-formats.html"},
{"source": "Deep-Storage.html", "target": "dependencies/deep-storage.html"},
{"source": "Design.html", "target": "design/design.html"},
{"source": "Design.html", "target": "design/index.html"},
{"source": "DimensionSpecs.html", "target": "querying/dimensionspecs.html"},
{"source": "Druid-vs-Cassandra.html", "target": "comparisons/druid-vs-key-value.html"},
{"source": "Druid-vs-Elasticsearch.html", "target": "comparisons/druid-vs-elasticsearch.html"},
@ -27,13 +27,13 @@
{"source": "Druid-vs-Spark.html", "target": "comparisons/druid-vs-spark.html"},
{"source": "Druid-vs-Vertica.html", "target": "comparisons/druid-vs-redshift.html"},
{"source": "Evaluate.html", "target": "tutorials/cluster.html"},
{"source": "Examples.html", "target": "tutorials/quickstart.html"},
{"source": "Examples.html", "target": "tutorials/index.html"},
{"source": "Filters.html", "target": "querying/filters.html"},
{"source": "Firehose.html", "target": "ingestion/firehose.html"},
{"source": "GeographicQueries.html", "target": "development/geo.html"},
{"source": "Granularities.html", "target": "querying/granularities.html"},
{"source": "GroupByQuery.html", "target": "querying/groupbyquery.html"},
{"source": "Hadoop-Configuration.html", "target": "configuration/hadoop.html"},
{"source": "Hadoop-Configuration.html", "target": "ingestion/hadoop.html"},
{"source": "Having.html", "target": "querying/having.html"},
{"source": "Historical-Config.html", "target": "configuration/index.html#historical"},
{"source": "Historical.html", "target": "design/historical.html"},
@ -41,7 +41,7 @@
{"source": "Indexing-Service-Config.html", "target": "configuration/index.html#overlord"},
{"source": "Indexing-Service.html", "target": "design/indexing-service.html"},
{"source": "Ingestion-FAQ.html", "target": "ingestion/faq.html"},
{"source": "Ingestion-overview.html", "target": "tutorials/ingestion.html"},
{"source": "Ingestion-overview.html", "target": "tutorials/index.html"},
{"source": "Ingestion.html", "target": "ingestion/index.html"},
{"source": "Integrating-Druid-With-Other-Technologies.html", "target": "development/integrating-druid-with-other-technologies.html"},
{"source": "Libraries.html", "target": "/libraries.html"},
@ -77,10 +77,10 @@
{"source": "TimeseriesQuery.html", "target": "querying/timeseriesquery.html"},
{"source": "TopNMetricSpec.html", "target": "querying/topnmetricspec.html"},
{"source": "TopNQuery.html", "target": "querying/topnquery.html"},
{"source": "Tutorial:-A-First-Look-at-Druid.html", "target": "tutorials/quickstart.html"},
{"source": "Tutorial:-All-About-Queries.html", "target": "tutorials/quickstart.html"},
{"source": "Tutorial:-A-First-Look-at-Druid.html", "target": "tutorials/index.html"},
{"source": "Tutorial:-All-About-Queries.html", "target": "tutorials/index.html"},
{"source": "Tutorial:-Loading-Batch-Data.html", "target": "tutorials/tutorial-batch.html"},
{"source": "Tutorial:-Loading-Streaming-Data.html", "target": "tutorials/tutorial-streams.html"},
{"source": "Tutorial:-Loading-Streaming-Data.html", "target": "tutorials/tutorial-kafka.html"},
{"source": "Tutorial:-The-Druid-Cluster.html", "target": "tutorials/cluster.html"},
{"source": "Tutorials.html", "target": "tutorials/index.html"},
{"source": "Versioning.html", "target": "development/versioning.html"},
@ -90,7 +90,7 @@
{"source": "comparisons/druid-vs-hadoop.html", "target": "druid-vs-sql-on-hadoop.html"},
{"source": "comparisons/druid-vs-impala-or-shark.html", "target": "druid-vs-sql-on-hadoop.html"},
{"source": "comparisons/druid-vs-vertica.html", "target": "druid-vs-redshift.html"},
{"source": "configuration/auth.html", "target": "design/auth.html"},
{"source": "configuration/auth.html", "target": "../design/auth.html"},
{"source": "configuration/broker.html", "target": "../configuration/index.html#broker"},
{"source": "configuration/caching.html", "target": "../configuration/index.html#cache-configuration"},
{"source": "configuration/coordinator.html", "target": "../configuration/index.html#coordinator"},
@ -120,9 +120,9 @@
{"source": "tutorials/tutorial-a-first-look-at-druid.html", "target": "index.html"},
{"source": "tutorials/tutorial-all-about-queries.html", "target": "index.html"},
{"source": "tutorials/tutorial-loading-batch-data.html", "target": "tutorial-batch.html"},
{"source": "tutorials/tutorial-loading-streaming-data.html", "target": "tutorial-streams.html"},
{"source": "tutorials/tutorial-loading-streaming-data.html", "target": "tutorial-kafka.html"},
{"source": "tutorials/tutorial-the-druid-cluster.html", "target": "cluster.html"},
{"source": "development/extensions-core/caffeine-cache.html", "target":"../../configuration/caching.html"},
{"source": "development/extensions-core/caffeine-cache.html", "target":"../../configuration/index.html#cache-configuration"},
{"source": "Production-Cluster-Configuration.html", "target": "tutorials/cluster.html"},
{"source": "development/extensions-contrib/parquet.html", "target":"../../development/extensions-core/parquet.html"},
{"source": "development/extensions-contrib/scan-query.html", "target":"../../querying/scan-query.html"},
@ -130,10 +130,10 @@
{"source": "tutorials/ingestion-streams.html", "target": "index.html"},
{"source": "ingestion/native-batch.html", "target": "native_tasks.html"},
{"source": "Compute.html", "target": "design/processes.html"},
{"source": "Contribute.html", "target": "../../community/index.html"},
{"source": "Download.html", "target": "../../downloads.html"},
{"source": "Contribute.html", "target": "/community/"},
{"source": "Download.html", "target": "/downloads.html"},
{"source": "Druid-Personal-Demo-Cluster.html", "target": "tutorials/index.html"},
{"source": "Home.html", "target": "index.html"},
{"source": "Home.html", "target": "design/index.html"},
{"source": "Loading-Your-Data.html", "target": "ingestion/index.html"},
{"source": "Master.html", "target": "design/processes.html"},
{"source": "MySQL.html", "target": "development/extensions-core/mysql.html"},
@ -141,29 +141,28 @@
{"source": "Querying-your-data.html", "target": "querying/querying.html"},
{"source": "Spatial-Filters.html", "target": "development/geo.html"},
{"source": "Spatial-Indexing.html", "target": "development/geo.html"},
{"source": "Stand-Alone-With-Riak-CS.html", "target": "index.html"},
{"source": "Support.html", "target": "../../community/index.html"},
{"source": "Stand-Alone-With-Riak-CS.html", "target": "design/index.html"},
{"source": "Support.html", "target": "/community/"},
{"source": "Tutorial:-Webstream.html", "target": "tutorials/index.html"},
{"source": "Twitter-Tutorial.html", "target": "tutorials/index.html"},
{"source": "Tutorial:-Loading-Your-Data-Part-1.html", "target": "tutorials/index.html"},
{"source": "Tutorial:-Loading-Your-Data-Part-2.html", "target": "tutorials/index.html"},
{"source": "Kafka-Eight.html", "target": "development/extensions-core/kafka-eight-firehose.html"},
{"source": "Thanks.html", "target": "../../community/index.html"},
{"source": "Thanks.html", "target": "/community/"},
{"source": "Tutorial-A-First-Look-at-Druid.html", "target": "tutorials/index.html"},
{"source": "Tutorial-All-About-Queries.html", "target": "tutorials/index.html"},
{"source": "Tutorial-Loading-Batch-Data.html", "target": "tutorials/index.html"},
{"source": "Tutorial-Loading-Streaming-Data.html", "target": "tutorials/index.html"},
{"source": "Tutorial-The-Druid-Cluster.html", "target": "tutorials/index.html"},
{"source": "configuration/hadoop.html", "target": "ingestion/hadoop.html"},
{"source": "configuration/production-cluster.html", "target": "tutorials/cluster.html"},
{"source": "configuration/zookeeper.html", "target": "dependencies/zookeeper.html"},
{"source": "querying/optimizations.html", "target": "dependencies/cluster.html"},
{"source": "configuration/hadoop.html", "target": "../ingestion/hadoop.html"},
{"source": "configuration/production-cluster.html", "target": "../tutorials/cluster.html"},
{"source": "configuration/zookeeper.html", "target": "../dependencies/zookeeper.html"},
{"source": "querying/optimizations.html", "target": "multi-value-dimensions.html"},
{"source": "development/community-extensions/azure.html", "target": "../extensions-contrib/azure.html"},
{"source": "development/community-extensions/cassandra.html", "target": "../extensions-contrib/cassandra.html"},
{"source": "development/community-extensions/cloudfiles.html", "target": "../extensions-contrib/cloudfiles.html"},
{"source": "development/community-extensions/graphite.html", "target": "../extensions-contrib/graphite.html"},
{"source": "development/community-extensions/kafka-simple.html", "target": "../extensions-contrib/kafka-simple.html"},
{"source": "development/community-extensions/rabbitmq.html", "target": "../extensions-contrib/rabbitmq.html"},
{"source": "development/extensions-core/namespaced-lookup.html", "target": "lookups-cached-global.html"},
{"source": "operations/insert-segment-to-db.html", "target": "../index.html"}
{"source": "development/extensions-core/namespaced-lookup.html", "target": "lookups-cached-global.html"}
]

View File

@ -95,4 +95,4 @@ You can optionally configure caching to be enabled on the realtime process by se
|`druid.realtime.cache.unCacheable`|All druid query types|All query types to not cache.|`["select"]`|
|`druid.realtime.cache.maxEntrySize`|positive integer or -1|Maximum size of an individual cache entry (processed results for one segment), in bytes, or -1 for unlimited.|`1000000` (1MB)|
See [cache configuration](caching.html) for how to configure cache settings.
See [cache configuration](index.html#cache-configuration) for how to configure cache settings.

View File

@ -44,7 +44,7 @@ Core extensions are maintained by Druid committers.
|druid-avro-extensions|Support for data in Apache Avro data format.|[link](../development/extensions-core/avro.html)|
|druid-basic-security|Support for Basic HTTP authentication and role-based access control.|[link](../development/extensions-core/druid-basic-security.html)|
|druid-bloom-filter|Support for providing Bloom filters in druid queries.|[link](../development/extensions-core/bloom-filter.html)|
|druid-caffeine-cache|A local cache implementation backed by Caffeine.|[link](../development/extensions-core/caffeine-cache.html)|
|druid-caffeine-cache|A local cache implementation backed by Caffeine.|[link](../configuration/index.html#cache-configuration)|
|druid-datasketches|Support for approximate counts and set operations with [DataSketches](http://datasketches.github.io/).|[link](../development/extensions-core/datasketches-extension.html)|
|druid-hdfs-storage|HDFS deep storage.|[link](../development/extensions-core/hdfs.html)|
|druid-histogram|Approximate histograms and quantiles aggregator. Deprecated, please use the [DataSketches quantiles aggregator](../development/extensions-core/datasketches-quantiles.html) from the `druid-datasketches` extension instead.|[link](../development/extensions-core/approximate-histograms.html)|

View File

@ -73,4 +73,4 @@ At some point in the future, we will likely move the internal UI code out of cor
## Client Libraries
We welcome contributions for new client libraries to interact with Druid. See client
[libraries](../development/libraries.html) for existing client libraries.
[libraries](/libraries.html) for existing client libraries.

View File

@ -41,7 +41,7 @@ Below is a description of the high-level features and functionality of the Druid
## Home
The home view provide a high level overview of the cluster. Each card is clickable and links to the appropriate view. The legacy menu allows you to go to the [legacy coordinator and overlord consoles](./management-uis#legacy-consoles) should you need them.
The home view provide a high level overview of the cluster. Each card is clickable and links to the appropriate view. The legacy menu allows you to go to the [legacy coordinator and overlord consoles](./management-uis.html#legacy-consoles) should you need them.
![home-view](./img/01-home-view.png)

View File

@ -0,0 +1,49 @@
---
layout: doc_page
title: "insert-segment-to-db Tool"
---
<!--
~ Licensed to the Apache Software Foundation (ASF) under one
~ or more contributor license agreements. See the NOTICE file
~ distributed with this work for additional information
~ regarding copyright ownership. The ASF licenses this file
~ to you under the Apache License, Version 2.0 (the
~ "License"); you may not use this file except in compliance
~ with the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing,
~ software distributed under the License is distributed on an
~ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
~ KIND, either express or implied. See the License for the
~ specific language governing permissions and limitations
~ under the License.
-->
# insert-segment-to-db Tool
In older versions of Apache Druid (incubating), `insert-segment-to-db` was a tool that could scan deep storage and
insert data from there into Druid metadata storage. It was intended to be used to update the segment table in the
metadata storage after manually migrating segments from one place to another, or even to recover lost metadata storage
by telling it where the segments are stored.
In Druid 0.14.x and earlier, Druid wrote segment metadata to two places: the metadata store's `druid_segments` table, and
`descriptor.json` files in deep storage. This practice was stopped in Druid 0.15.0 as part of
[consolidated metadata management](https://github.com/apache/druid/issues/6849), for the following reasons:
1. If any segments are manually dropped or re-enabled by cluster operators, this information is not reflected in
deep storage. Restoring metadata from deep storage would undo any such drops or re-enables.
2. Ingestion methods that allocate segments optimistically (such as native Kafka or Kinesis stream ingestion, or native
batch ingestion in 'append' mode) can write segments to deep storage that are not meant to actually be used by the
Druid cluster. There is no way, while purely looking at deep storage, to differentiate the segments that made it into
the metadata store originally (and therefore _should_ be used) from the segments that did not (and therefore
_should not_ be used).
3. Nothing in Druid other than the `insert-segment-to-db` tool read the `descriptor.json` files.
After this change, Druid stopped writing `descriptor.json` files to deep storage, and now only writes segment metadata
to the metadata store. This meant the `insert-segment-to-db` tool is no longer useful, so it was removed in Druid 0.15.0.
It is highly recommended that you take regular backups of your metadata store, since it is difficult to recover Druid
clusters properly without it.

View File

@ -298,7 +298,7 @@ Please note that `hyperUnique` aggregators are not mutually compatible with Data
Note the DataSketches Theta and HLL aggregators currently only support single-column inputs. If you were previously using the Cardinality aggregator with multiple-column inputs, equivalent operations using Theta or HLL sketches are described below:
* Multi-column `byValue` Cardinality can be replaced with a union of Theta sketches on the individual input columns
* Multi-column `byRow` Cardinality can be replaced with a Theta or HLL sketch on a single [virtual column]((../querying/virtual-columns.html) that combines the individual input columns.
* Multi-column `byRow` Cardinality can be replaced with a Theta or HLL sketch on a single [virtual column](../querying/virtual-columns.html) that combines the individual input columns.
### Histograms and quantiles
@ -318,7 +318,7 @@ As a general guideline for experimentation, the [Moments Sketch paper](https://a
#### Fixed Buckets Histogram
Druid also provides a [simple histogram implementation]((../development/extensions-core/approxiate-histograms.html#fixed-buckets-histogram) that uses a fixed range and fixed number of buckets with support for quantile estimation, backed by an array of bucket count values.
Druid also provides a [simple histogram implementation](../development/extensions-core/approximate-histograms.html#fixed-buckets-histogram) that uses a fixed range and fixed number of buckets with support for quantile estimation, backed by an array of bucket count values.
The fixed buckets histogram can perform well when the distribution of the input data allows a small number of buckets to be used.

View File

@ -57,7 +57,7 @@ If your multitenant cluster uses shared datasources, most of your queries will l
dimension. These sorts of queries perform best when data is well-partitioned by tenant. There are a few ways to
accomplish this.
With batch indexing, you can use [single-dimension partitioning](../indexing/batch-ingestion.html#single-dimension-partitioning)
With batch indexing, you can use [single-dimension partitioning](../ingestion/hadoop.html#single-dimension-partitioning)
to partition your data by tenant_id. Druid always partitions by time first, but the secondary partition within each
time bucket will be on tenant_id.

View File

@ -37,7 +37,7 @@ curl -X POST '<queryable_host>:<port>/druid/v2/?pretty' -H 'Content-Type:applica
```
Druid's native query language is JSON over HTTP, although many members of the community have contributed different
[client libraries](../development/libraries.html) in other languages to query Druid.
[client libraries](/libraries.html) in other languages to query Druid.
The Content-Type/Accept Headers can also take 'application/x-jackson-smile'.

View File

@ -744,4 +744,4 @@ Broker will emit the following metrics for SQL.
## Authorization Permissions
Please see [Defining SQL permissions](../../development/extensions-core/druid-basic-security.html#sql-permissions) for information on what permissions are needed for making SQL queries in a secured cluster.
Please see [Defining SQL permissions](../development/extensions-core/druid-basic-security.html#sql-permissions) for information on what permissions are needed for making SQL queries in a secured cluster.

View File

@ -157,7 +157,7 @@ layout: toc
## Development
* [Overview](/docs/VERSION/development/overview.html)
* [Libraries](/docs/VERSION/development/libraries.html)
* [Libraries](/libraries.html)
* [Extensions](/docs/VERSION/development/extensions.html)
* [JavaScript](/docs/VERSION/development/javascript.html)
* [Build From Source](/docs/VERSION/development/build.html)

View File

@ -102,12 +102,12 @@ cd apache-druid-#{DRUIDVERSION}
In the package, you should find:
* `DISCLAIMER`, `LICENSE`, and `NOTICE` files
* `bin/*` - scripts related to the [single-machine quickstart](quickstart.html)
* `bin/*` - scripts related to the [single-machine quickstart](index.html)
* `conf/*` - template configurations for a clustered setup
* `extensions/*` - core Druid extensions
* `hadoop-dependencies/*` - Druid Hadoop dependencies
* `lib/*` - libraries and dependencies for core Druid
* `quickstart/*` - files related to the [single-machine quickstart](quickstart.html)
* `quickstart/*` - files related to the [single-machine quickstart](index.html)
We'll be editing the files in `conf/` in order to get things running.

View File

@ -31,7 +31,7 @@ This tutorial shows you how to load streaming data into Apache Druid (incubating
[Tranquility Server](https://github.com/druid-io/tranquility/blob/master/docs/server.md) allows a stream of data to be pushed into Druid using HTTP POSTs.
For this tutorial, we'll assume you've already downloaded Druid as described in
the [single-machine quickstart](quickstart.html) and have it running on your local machine. You
the [single-machine quickstart](index.html) and have it running on your local machine. You
don't need to have loaded any data yet.
## Download Tranquility