From fcb01deb3433bee4a5449b6750a995213de6a596 Mon Sep 17 00:00:00 2001 From: Pascal Borreli Date: Mon, 10 Oct 2016 21:51:47 +0100 Subject: [PATCH] Fixed typos (#20843) --- .../bucket/diversified-sampler-aggregation.asciidoc | 4 ++-- .../aggregations/bucket/terms-aggregation.asciidoc | 2 +- .../analysis/tokenizers/lowercase-tokenizer.asciidoc | 2 +- docs/reference/how-to/indexing-speed.asciidoc | 6 +++--- docs/reference/index-modules/store.asciidoc | 2 +- docs/reference/ingest/ingest-node.asciidoc | 2 +- docs/reference/mapping/dynamic/templates.asciidoc | 2 +- docs/reference/migration/migrate_6_0/plugins.asciidoc | 2 +- docs/reference/migration/migrate_6_0/rest.asciidoc | 4 ++-- docs/reference/modules/node.asciidoc | 2 +- docs/reference/modules/scripting/using.asciidoc | 2 +- docs/reference/modules/snapshots.asciidoc | 2 +- docs/reference/query-dsl/function-score-query.asciidoc | 2 +- docs/reference/setup/bootstrap-checks.asciidoc | 2 +- docs/reference/setup/important-settings.asciidoc | 2 +- docs/reference/setup/install/init-systemd.asciidoc | 2 +- docs/reference/setup/install/zip-targz.asciidoc | 4 ++-- docs/reference/setup/sysconfig/heap_size.asciidoc | 2 +- docs/reference/testing/testing-framework.asciidoc | 2 +- 19 files changed, 24 insertions(+), 24 deletions(-) diff --git a/docs/reference/aggregations/bucket/diversified-sampler-aggregation.asciidoc b/docs/reference/aggregations/bucket/diversified-sampler-aggregation.asciidoc index 70412d2680a..ac6dff5a248 100644 --- a/docs/reference/aggregations/bucket/diversified-sampler-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/diversified-sampler-aggregation.asciidoc @@ -63,7 +63,7 @@ Response: } -------------------------------------------------- -<1> 1000 documents were sampled in total becase we asked for a maximum of 200 from an index with 5 shards. The cost of performing the nested significant_terms aggregation was therefore limited rather than unbounded. +<1> 1000 documents were sampled in total because we asked for a maximum of 200 from an index with 5 shards. The cost of performing the nested significant_terms aggregation was therefore limited rather than unbounded. <2> The results of the significant_terms aggregation are not skewed by any single over-active Twitter user because we asked for a maximum of one tweet from any one user in our sample. @@ -92,7 +92,7 @@ Controlling diversity using a field: { "aggs" : { "sample" : { - "diverisfied_sampler" : { + "diversified_sampler" : { "field" : "author", "max_docs_per_value" : 3 } diff --git a/docs/reference/aggregations/bucket/terms-aggregation.asciidoc b/docs/reference/aggregations/bucket/terms-aggregation.asciidoc index 68b2e8511f9..fb3baca0967 100644 --- a/docs/reference/aggregations/bucket/terms-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/terms-aggregation.asciidoc @@ -499,7 +499,7 @@ TIP: for indexed scripts replace the `file` parameter with an `id` parameter. "aggs" : { "genres" : { "terms" : { - "field" : "gendre", + "field" : "gender", "script" : { "inline" : "'Genre: ' +_value" "lang" : "painless" diff --git a/docs/reference/analysis/tokenizers/lowercase-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/lowercase-tokenizer.asciidoc index 5aad28b4394..b175267296d 100644 --- a/docs/reference/analysis/tokenizers/lowercase-tokenizer.asciidoc +++ b/docs/reference/analysis/tokenizers/lowercase-tokenizer.asciidoc @@ -2,7 +2,7 @@ === Lowercase Tokenizer -The `lowercase` toknenizer, like the +The `lowercase` tokenizer, like the <> breaks text into terms whenever it encounters a character which is not a letter, but it also lowecases all terms. It is functionally equivalent to the diff --git a/docs/reference/how-to/indexing-speed.asciidoc b/docs/reference/how-to/indexing-speed.asciidoc index 50187af5b28..b0bd5fef802 100644 --- a/docs/reference/how-to/indexing-speed.asciidoc +++ b/docs/reference/how-to/indexing-speed.asciidoc @@ -5,7 +5,7 @@ === Use bulk requests Bulk requests will yield much better performance than single-document index -requests. In order to know the optimal size of a bulk request, you shoud run +requests. In order to know the optimal size of a bulk request, you should run a benchmark on a single node with a single shard. First try to index 100 documents at once, then 200, then 400, etc. doubling the number of documents in a bulk request in every benchmark run. When the indexing speed starts to @@ -32,7 +32,7 @@ When it happens, you should pause indexing a bit before trying again, ideally with randomized exponential backoff. Similarly to sizing bulk requests, only testing can tell what the optimal -number of workers is. This can be tested by progressivily increasing the +number of workers is. This can be tested by progressively increasing the number of workers until either I/O or CPU is saturated on the cluster. [float] @@ -58,7 +58,7 @@ original values. === Disable swapping You should make sure that the operating system is not swapping out the java -process by <>. +process by <>. [float] === Give memory to the filesystem cache diff --git a/docs/reference/index-modules/store.asciidoc b/docs/reference/index-modules/store.asciidoc index ee7fd8766fd..8d552ae078f 100644 --- a/docs/reference/index-modules/store.asciidoc +++ b/docs/reference/index-modules/store.asciidoc @@ -79,7 +79,7 @@ By default, elasticsearch completely relies on the operating system file system cache for caching I/O operations. It is possible to set `index.store.preload` in order to tell the operating system to load the content of hot index files into memory upon opening. This setting accept a comma-separated list of -files extensions: all files whose extenion is in the list will be pre-loaded +files extensions: all files whose extension is in the list will be pre-loaded upon opening. This can be useful to improve search performance of an index, especially when the host operating system is restarted, since this causes the file system cache to be trashed. However note that this may slow down the diff --git a/docs/reference/ingest/ingest-node.asciidoc b/docs/reference/ingest/ingest-node.asciidoc index 48a0bc67e6b..013275c0466 100644 --- a/docs/reference/ingest/ingest-node.asciidoc +++ b/docs/reference/ingest/ingest-node.asciidoc @@ -660,7 +660,7 @@ A node will not start if either of these plugins are not available. The <> can be used to fetch ingest usage statistics, globally and on a per pipeline basis. Useful to find out which pipelines are used the most or spent the most time on preprocessing. -[[append-procesesor]] +[[append-processor]] === Append Processor Appends one or more values to an existing array if the field already exists and it is an array. Converts a scalar to an array and appends one or more values to it if the field exists and it is a scalar. diff --git a/docs/reference/mapping/dynamic/templates.asciidoc b/docs/reference/mapping/dynamic/templates.asciidoc index 21b0eadf683..468df64b1d4 100644 --- a/docs/reference/mapping/dynamic/templates.asciidoc +++ b/docs/reference/mapping/dynamic/templates.asciidoc @@ -353,7 +353,7 @@ remove it as described in the previous section. ===== Time-series -When doing time series analysis with elastisearch, it is common to have many +When doing time series analysis with elasticsearch, it is common to have many numeric fields that you will often aggregate on but never filter on. In such a case, you could disable indexing on those fields to save disk space and also maybe gain some indexing speed: diff --git a/docs/reference/migration/migrate_6_0/plugins.asciidoc b/docs/reference/migration/migrate_6_0/plugins.asciidoc index ff8a75ab448..bf73dc10e94 100644 --- a/docs/reference/migration/migrate_6_0/plugins.asciidoc +++ b/docs/reference/migration/migrate_6_0/plugins.asciidoc @@ -3,5 +3,5 @@ ==== Mapper attachments plugin -* The mapper attachments plugin has been depecated in elasticsearch 5.0 and is now removed. +* The mapper attachments plugin has been deprecated in elasticsearch 5.0 and is now removed. You can use {plugins}/ingest-attachment.html[ingest attachment plugin] instead. diff --git a/docs/reference/migration/migrate_6_0/rest.asciidoc b/docs/reference/migration/migrate_6_0/rest.asciidoc index 1e02df1f61f..a0ac594e3fc 100644 --- a/docs/reference/migration/migrate_6_0/rest.asciidoc +++ b/docs/reference/migration/migrate_6_0/rest.asciidoc @@ -4,6 +4,6 @@ ==== Unquoted JSON In previous versions of Elasticsearch, JSON documents were allowed to contain unquoted field names. -This feature was removed in the 5.x series, but a backwards-compability layer was added via the -system property `elasticsearch.json.allow_unquoted_field_names`. This backwards-compability layer +This feature was removed in the 5.x series, but a backwards-compatibility layer was added via the +system property `elasticsearch.json.allow_unquoted_field_names`. This backwards-compatibility layer has been removed in Elasticsearch 6.0.0. diff --git a/docs/reference/modules/node.asciidoc b/docs/reference/modules/node.asciidoc index e1cc3e0b866..b921edca731 100644 --- a/docs/reference/modules/node.asciidoc +++ b/docs/reference/modules/node.asciidoc @@ -287,7 +287,7 @@ machine. In production, however, it is recommended to run only one node of Elast By default, Elasticsearch is configured to prevent more than one node from sharing the same data path. To allow for more than one node (e.g., on your development machine), use the setting -`node.max_local_storage_nodes` and set this to a positve integer larger than one. +`node.max_local_storage_nodes` and set this to a positive integer larger than one. WARNING: Never run different node types (i.e. master, data) from the same data directory. This can lead to unexpected data loss. diff --git a/docs/reference/modules/scripting/using.asciidoc b/docs/reference/modules/scripting/using.asciidoc index 17ab4a8180a..c3af5861879 100644 --- a/docs/reference/modules/scripting/using.asciidoc +++ b/docs/reference/modules/scripting/using.asciidoc @@ -13,7 +13,7 @@ the same pattern: } ------------------------------------- <1> The language the script is written in, which defaults to `painless`. -<2> The script itself which may be specfied as `inline`, `id`, or `file`. +<2> The script itself which may be specified as `inline`, `id`, or `file`. <3> Any named parameters that should be passed into the script. For example, the following script is used in a search request to return a diff --git a/docs/reference/modules/snapshots.asciidoc b/docs/reference/modules/snapshots.asciidoc index 9fd8e069480..4d74500d68e 100644 --- a/docs/reference/modules/snapshots.asciidoc +++ b/docs/reference/modules/snapshots.asciidoc @@ -277,7 +277,7 @@ GET /_snapshot/my_backup/_all ----------------------------------- // CONSOLE -The command fails if some of the snapshots are unavailable. The boolean parameter `ignore_unvailable` can be used to +The command fails if some of the snapshots are unavailable. The boolean parameter `ignore_unavailable` can be used to return all snapshots that are currently available. A currently running snapshot can be retrieved using the following command: diff --git a/docs/reference/query-dsl/function-score-query.asciidoc b/docs/reference/query-dsl/function-score-query.asciidoc index 3072372d179..6e83dd927e0 100644 --- a/docs/reference/query-dsl/function-score-query.asciidoc +++ b/docs/reference/query-dsl/function-score-query.asciidoc @@ -318,7 +318,7 @@ In the above example, the field is a <> and origin can be math (for example `now-1h`) is supported for origin. `scale`:: - Required for all types. Defines the distance from origin + offest at which the computed + Required for all types. Defines the distance from origin + offset at which the computed score will equal `decay` parameter. For geo fields: Can be defined as number+unit (1km, 12m,...). Default unit is meters. For date fields: Can to be defined as a number+unit ("1h", "10d",...). Default unit is milliseconds. For numeric field: Any number. diff --git a/docs/reference/setup/bootstrap-checks.asciidoc b/docs/reference/setup/bootstrap-checks.asciidoc index 0f88d03401c..8c1bab474c8 100644 --- a/docs/reference/setup/bootstrap-checks.asciidoc +++ b/docs/reference/setup/bootstrap-checks.asciidoc @@ -101,7 +101,7 @@ process has unlimited address space and is enforced only on Linux. To pass the maximum size virtual memory check, you must configure your system to allow the Elasticsearch process the ability to have unlimited address space. This can be done via `/etc/security/limits.conf` using -the `as` setting to `unlimited` (note that you might have to increaes +the `as` setting to `unlimited` (note that you might have to increase the limits for the `root` user too). === Maximum map count check diff --git a/docs/reference/setup/important-settings.asciidoc b/docs/reference/setup/important-settings.asciidoc index aecd205b613..cd957addef1 100644 --- a/docs/reference/setup/important-settings.asciidoc +++ b/docs/reference/setup/important-settings.asciidoc @@ -70,7 +70,7 @@ environments, otherwise you might end up with nodes joining the wrong cluster. [[node.name]] === `node.name` -By default, Elasticsearch will take the 7 first charachter of the randomly generated uuid used as the node id. +By default, Elasticsearch will take the 7 first character of the randomly generated uuid used as the node id. Note that the node id is persisted and does not change when a node restarts and therefore the default node name will also not change. diff --git a/docs/reference/setup/install/init-systemd.asciidoc b/docs/reference/setup/install/init-systemd.asciidoc index 3e252ca94cd..1532c5313ae 100644 --- a/docs/reference/setup/install/init-systemd.asciidoc +++ b/docs/reference/setup/install/init-systemd.asciidoc @@ -1,7 +1,7 @@ ==== SysV `init` vs `systemd` Elasticsearch is not started automatically after installation. How to start -and stop Elasticsearch depends on whether your sytem uses SysV `init` or +and stop Elasticsearch depends on whether your system uses SysV `init` or `systemd` (used by newer distributions). You can tell which is being used by running this command: diff --git a/docs/reference/setup/install/zip-targz.asciidoc b/docs/reference/setup/install/zip-targz.asciidoc index 826d94a4289..fc47214615f 100644 --- a/docs/reference/setup/install/zip-targz.asciidoc +++ b/docs/reference/setup/install/zip-targz.asciidoc @@ -21,7 +21,7 @@ endif::[] ifeval::["{release-state}"!="unreleased"] -The `.zip` archive for Elastisearch v{version} can be downloaded and installed as follows: +The `.zip` archive for Elasticsearch v{version} can be downloaded and installed as follows: ["source","sh",subs="attributes"] @@ -49,7 +49,7 @@ endif::[] ifeval::["{release-state}"!="unreleased"] -The `.tar.gz` archive for Elastisearch v{version} can be downloaded and installed as follows: +The `.tar.gz` archive for Elasticsearch v{version} can be downloaded and installed as follows: ["source","sh",subs="attributes"] -------------------------------------------- diff --git a/docs/reference/setup/sysconfig/heap_size.asciidoc b/docs/reference/setup/sysconfig/heap_size.asciidoc index f54ca7813a2..55fb95bc7e6 100644 --- a/docs/reference/setup/sysconfig/heap_size.asciidoc +++ b/docs/reference/setup/sysconfig/heap_size.asciidoc @@ -67,7 +67,7 @@ ES_JAVA_OPTS="-Xms4000m -Xmx4000m" ./bin/elasticsearch <2> <2> Set the minimum and maximum heap size to 4000 MB. NOTE: Configuring the heap for the <> -is different than the above. The values initiallly populated for the +is different than the above. The values initially populated for the Windows service can be configured as above but are different after the service has been installed. Consult the <> for additional diff --git a/docs/reference/testing/testing-framework.asciidoc b/docs/reference/testing/testing-framework.asciidoc index 0bf99b2fafa..60ea506a502 100644 --- a/docs/reference/testing/testing-framework.asciidoc +++ b/docs/reference/testing/testing-framework.asciidoc @@ -253,7 +253,7 @@ Usually, you would combine assertions and matchers in your test like this [source,java] ---------------------------- -SearchResponse seearchResponse = client().prepareSearch() ...; +SearchResponse searchResponse = client().prepareSearch() ...; assertHitCount(searchResponse, 4); assertFirstHit(searchResponse, hasId("4")); assertSearchHits(searchResponse, "1", "2", "3", "4");