[DOCS] Sort analyzers, tokenizers, and token filters alphabetically (#48068)

This commit is contained in:
James Rodewig 2019-10-15 15:46:50 -04:00
parent d9c7e3847e
commit 601a88bede
5 changed files with 89 additions and 95 deletions

View File

@ -56,21 +56,20 @@ If you do not find an analyzer suitable for your needs, you can create a
include::analyzers/configuring.asciidoc[]
include::analyzers/standard-analyzer.asciidoc[]
include::analyzers/simple-analyzer.asciidoc[]
include::analyzers/whitespace-analyzer.asciidoc[]
include::analyzers/stop-analyzer.asciidoc[]
include::analyzers/fingerprint-analyzer.asciidoc[]
include::analyzers/keyword-analyzer.asciidoc[]
include::analyzers/pattern-analyzer.asciidoc[]
include::analyzers/lang-analyzer.asciidoc[]
include::analyzers/fingerprint-analyzer.asciidoc[]
include::analyzers/pattern-analyzer.asciidoc[]
include::analyzers/simple-analyzer.asciidoc[]
include::analyzers/standard-analyzer.asciidoc[]
include::analyzers/stop-analyzer.asciidoc[]
include::analyzers/whitespace-analyzer.asciidoc[]
include::analyzers/custom-analyzer.asciidoc[]

View File

@ -6,42 +6,43 @@ Token filters accept a stream of tokens from a
(eg lowercasing), delete tokens (eg remove stopwords)
or add tokens (eg synonyms).
Elasticsearch has a number of built in token filters which can be
used to build <<analysis-custom-analyzer,custom analyzers>>.
{es} has a number of built-in token filters you can use
to build <<analysis-custom-analyzer,custom analyzers>>.
include::tokenfilters/apostrophe-tokenfilter.asciidoc[]
include::tokenfilters/asciifolding-tokenfilter.asciidoc[]
include::tokenfilters/flatten-graph-tokenfilter.asciidoc[]
include::tokenfilters/cjk-bigram-tokenfilter.asciidoc[]
include::tokenfilters/length-tokenfilter.asciidoc[]
include::tokenfilters/cjk-width-tokenfilter.asciidoc[]
include::tokenfilters/lowercase-tokenfilter.asciidoc[]
include::tokenfilters/classic-tokenfilter.asciidoc[]
include::tokenfilters/uppercase-tokenfilter.asciidoc[]
include::tokenfilters/common-grams-tokenfilter.asciidoc[]
include::tokenfilters/ngram-tokenfilter.asciidoc[]
include::tokenfilters/edgengram-tokenfilter.asciidoc[]
include::tokenfilters/porterstem-tokenfilter.asciidoc[]
include::tokenfilters/shingle-tokenfilter.asciidoc[]
include::tokenfilters/stop-tokenfilter.asciidoc[]
include::tokenfilters/word-delimiter-tokenfilter.asciidoc[]
include::tokenfilters/word-delimiter-graph-tokenfilter.asciidoc[]
include::tokenfilters/multiplexer-tokenfilter.asciidoc[]
include::tokenfilters/compound-word-tokenfilter.asciidoc[]
include::tokenfilters/condition-tokenfilter.asciidoc[]
include::tokenfilters/predicate-tokenfilter.asciidoc[]
include::tokenfilters/decimal-digit-tokenfilter.asciidoc[]
include::tokenfilters/stemmer-tokenfilter.asciidoc[]
include::tokenfilters/delimited-payload-tokenfilter.asciidoc[]
include::tokenfilters/stemmer-override-tokenfilter.asciidoc[]
include::tokenfilters/edgengram-tokenfilter.asciidoc[]
include::tokenfilters/elision-tokenfilter.asciidoc[]
include::tokenfilters/fingerprint-tokenfilter.asciidoc[]
include::tokenfilters/flatten-graph-tokenfilter.asciidoc[]
include::tokenfilters/hunspell-tokenfilter.asciidoc[]
include::tokenfilters/keep-types-tokenfilter.asciidoc[]
include::tokenfilters/keep-words-tokenfilter.asciidoc[]
include::tokenfilters/keyword-marker-tokenfilter.asciidoc[]
@ -49,56 +50,56 @@ include::tokenfilters/keyword-repeat-tokenfilter.asciidoc[]
include::tokenfilters/kstem-tokenfilter.asciidoc[]
include::tokenfilters/snowball-tokenfilter.asciidoc[]
include::tokenfilters/length-tokenfilter.asciidoc[]
include::tokenfilters/phonetic-tokenfilter.asciidoc[]
include::tokenfilters/limit-token-count-tokenfilter.asciidoc[]
include::tokenfilters/synonym-tokenfilter.asciidoc[]
include::tokenfilters/lowercase-tokenfilter.asciidoc[]
include::tokenfilters/synonym-graph-tokenfilter.asciidoc[]
include::tokenfilters/minhash-tokenfilter.asciidoc[]
include::tokenfilters/compound-word-tokenfilter.asciidoc[]
include::tokenfilters/multiplexer-tokenfilter.asciidoc[]
include::tokenfilters/reverse-tokenfilter.asciidoc[]
include::tokenfilters/ngram-tokenfilter.asciidoc[]
include::tokenfilters/elision-tokenfilter.asciidoc[]
include::tokenfilters/truncate-tokenfilter.asciidoc[]
include::tokenfilters/unique-tokenfilter.asciidoc[]
include::tokenfilters/normalization-tokenfilter.asciidoc[]
include::tokenfilters/pattern-capture-tokenfilter.asciidoc[]
include::tokenfilters/pattern_replace-tokenfilter.asciidoc[]
include::tokenfilters/phonetic-tokenfilter.asciidoc[]
include::tokenfilters/porterstem-tokenfilter.asciidoc[]
include::tokenfilters/predicate-tokenfilter.asciidoc[]
include::tokenfilters/remove-duplicates-tokenfilter.asciidoc[]
include::tokenfilters/reverse-tokenfilter.asciidoc[]
include::tokenfilters/shingle-tokenfilter.asciidoc[]
include::tokenfilters/snowball-tokenfilter.asciidoc[]
include::tokenfilters/stemmer-tokenfilter.asciidoc[]
include::tokenfilters/stemmer-override-tokenfilter.asciidoc[]
include::tokenfilters/stop-tokenfilter.asciidoc[]
include::tokenfilters/synonym-tokenfilter.asciidoc[]
include::tokenfilters/synonym-graph-tokenfilter.asciidoc[]
include::tokenfilters/trim-tokenfilter.asciidoc[]
include::tokenfilters/limit-token-count-tokenfilter.asciidoc[]
include::tokenfilters/truncate-tokenfilter.asciidoc[]
include::tokenfilters/hunspell-tokenfilter.asciidoc[]
include::tokenfilters/unique-tokenfilter.asciidoc[]
include::tokenfilters/common-grams-tokenfilter.asciidoc[]
include::tokenfilters/uppercase-tokenfilter.asciidoc[]
include::tokenfilters/normalization-tokenfilter.asciidoc[]
include::tokenfilters/word-delimiter-tokenfilter.asciidoc[]
include::tokenfilters/cjk-width-tokenfilter.asciidoc[]
include::tokenfilters/cjk-bigram-tokenfilter.asciidoc[]
include::tokenfilters/delimited-payload-tokenfilter.asciidoc[]
include::tokenfilters/keep-words-tokenfilter.asciidoc[]
include::tokenfilters/keep-types-tokenfilter.asciidoc[]
include::tokenfilters/classic-tokenfilter.asciidoc[]
include::tokenfilters/apostrophe-tokenfilter.asciidoc[]
include::tokenfilters/decimal-digit-tokenfilter.asciidoc[]
include::tokenfilters/fingerprint-tokenfilter.asciidoc[]
include::tokenfilters/minhash-tokenfilter.asciidoc[]
include::tokenfilters/remove-duplicates-tokenfilter.asciidoc[]
include::tokenfilters/word-delimiter-graph-tokenfilter.asciidoc[]

View File

@ -71,6 +71,7 @@ The response will be:
Note how only the `<NUM>` token is in the output.
[discrete]
=== Exclude mode settings example
If the `mode` parameter is set to `exclude` like in the following example:

View File

@ -163,7 +163,7 @@ PUT /test_index
Using `synonyms_path` to define WordNet synonyms in a file is supported
as well.
[discrete]
=== Parsing synonym files
Elasticsearch will use the token filters preceding the synonym filter

View File

@ -121,41 +121,34 @@ path, splits on the path separator, and emits a term for each component in the
tree, e.g. `/foo/bar/baz` -> `[/foo, /foo/bar, /foo/bar/baz ]`.
include::tokenizers/chargroup-tokenizer.asciidoc[]
include::tokenizers/classic-tokenizer.asciidoc[]
include::tokenizers/edgengram-tokenizer.asciidoc[]
include::tokenizers/standard-tokenizer.asciidoc[]
include::tokenizers/keyword-tokenizer.asciidoc[]
include::tokenizers/letter-tokenizer.asciidoc[]
include::tokenizers/lowercase-tokenizer.asciidoc[]
include::tokenizers/whitespace-tokenizer.asciidoc[]
include::tokenizers/uaxurlemail-tokenizer.asciidoc[]
include::tokenizers/classic-tokenizer.asciidoc[]
include::tokenizers/thai-tokenizer.asciidoc[]
include::tokenizers/ngram-tokenizer.asciidoc[]
include::tokenizers/edgengram-tokenizer.asciidoc[]
include::tokenizers/keyword-tokenizer.asciidoc[]
include::tokenizers/pattern-tokenizer.asciidoc[]
include::tokenizers/chargroup-tokenizer.asciidoc[]
include::tokenizers/simplepattern-tokenizer.asciidoc[]
include::tokenizers/simplepatternsplit-tokenizer.asciidoc[]
include::tokenizers/pathhierarchy-tokenizer.asciidoc[]
include::tokenizers/pathhierarchy-tokenizer-examples.asciidoc[]
include::tokenizers/pattern-tokenizer.asciidoc[]
include::tokenizers/simplepattern-tokenizer.asciidoc[]
include::tokenizers/simplepatternsplit-tokenizer.asciidoc[]
include::tokenizers/standard-tokenizer.asciidoc[]
include::tokenizers/thai-tokenizer.asciidoc[]
include::tokenizers/uaxurlemail-tokenizer.asciidoc[]
include::tokenizers/whitespace-tokenizer.asciidoc[]