diff --git a/docs/reference/analysis.asciidoc b/docs/reference/analysis.asciidoc index aaa40c01aea..a5625abbd61 100644 --- a/docs/reference/analysis.asciidoc +++ b/docs/reference/analysis.asciidoc @@ -150,11 +150,11 @@ include::analysis/testing.asciidoc[] include::analysis/analyzers.asciidoc[] -include::analysis/normalizers.asciidoc[] - include::analysis/tokenizers.asciidoc[] include::analysis/tokenfilters.asciidoc[] include::analysis/charfilters.asciidoc[] +include::analysis/normalizers.asciidoc[] + diff --git a/docs/reference/analysis/analyzers.asciidoc b/docs/reference/analysis/analyzers.asciidoc index 2fac8fb1455..84b6011a864 100644 --- a/docs/reference/analysis/analyzers.asciidoc +++ b/docs/reference/analysis/analyzers.asciidoc @@ -1,5 +1,5 @@ [[analysis-analyzers]] -== Analyzers +== Built-in analyzer reference Elasticsearch ships with a wide range of built-in analyzers, which can be used in any index without further configuration: diff --git a/docs/reference/analysis/charfilters.asciidoc b/docs/reference/analysis/charfilters.asciidoc index 47390fe0ae8..97fe4fd266b 100644 --- a/docs/reference/analysis/charfilters.asciidoc +++ b/docs/reference/analysis/charfilters.asciidoc @@ -1,5 +1,5 @@ [[analysis-charfilters]] -== Character Filters +== Character filters reference _Character filters_ are used to preprocess the stream of characters before it is passed to the <>. diff --git a/docs/reference/analysis/tokenfilters.asciidoc b/docs/reference/analysis/tokenfilters.asciidoc index 7172b5e4149..46cd0347b72 100644 --- a/docs/reference/analysis/tokenfilters.asciidoc +++ b/docs/reference/analysis/tokenfilters.asciidoc @@ -1,5 +1,5 @@ [[analysis-tokenfilters]] -== Token Filters +== Token filter reference Token filters accept a stream of tokens from a <> and can modify tokens diff --git a/docs/reference/analysis/tokenizers.asciidoc b/docs/reference/analysis/tokenizers.asciidoc index f4a8a13aabb..830ab3149ce 100644 --- a/docs/reference/analysis/tokenizers.asciidoc +++ b/docs/reference/analysis/tokenizers.asciidoc @@ -1,5 +1,5 @@ [[analysis-tokenizers]] -== Tokenizers +== Tokenizer reference A _tokenizer_ receives a stream of characters, breaks it up into individual _tokens_ (usually individual words), and outputs a stream of _tokens_. For