mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-02-08 22:14:59 +00:00
This commit changes the default out-of-the-box configuration for the number of shards from five to one. We think this will help address a common problem of oversharding. For users with time-based indices that need a different default, this can be managed with index templates. For users with non-time-based indices that find they need to re-shard with the split API in place they no longer need to resort only to reindexing. Since this has the impact of changing the default number of shards used in REST tests, we want to ensure that we still have coverage for issues that could arise from multiple shards. As such, we randomize (rarely) the default number of shards in REST tests to two. This is managed via a global index template. However, some tests check the templates that are in the cluster state during the test. Since this template is randomly there, we need a way for tests to skip adding the template used to set the number of shards to two. For this we add the default_shards feature skip. To avoid having to write our docs in a complicated way because sometimes they might be behind one shard, and sometimes they might be behind two shards we apply the default_shards feature skip to all docs tests. That is, these tests will always run with the default number of shards (one).
176 lines
2.9 KiB
Plaintext
176 lines
2.9 KiB
Plaintext
[[normalizer]]
|
|
=== `normalizer`
|
|
|
|
The `normalizer` property of <<keyword,`keyword`>> fields is similar to
|
|
<<analyzer,`analyzer`>> except that it guarantees that the analysis chain
|
|
produces a single token.
|
|
|
|
The `normalizer` is applied prior to indexing the keyword, as well as at
|
|
search-time when the `keyword` field is searched via a query parser such as
|
|
the <<query-dsl-match-query,`match`>> query or via a term level query
|
|
such as the <<query-dsl-term-query,`term`>> query.
|
|
|
|
[source,js]
|
|
--------------------------------
|
|
PUT index
|
|
{
|
|
"settings": {
|
|
"analysis": {
|
|
"normalizer": {
|
|
"my_normalizer": {
|
|
"type": "custom",
|
|
"char_filter": [],
|
|
"filter": ["lowercase", "asciifolding"]
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"mappings": {
|
|
"_doc": {
|
|
"properties": {
|
|
"foo": {
|
|
"type": "keyword",
|
|
"normalizer": "my_normalizer"
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
PUT index/_doc/1
|
|
{
|
|
"foo": "BÀR"
|
|
}
|
|
|
|
PUT index/_doc/2
|
|
{
|
|
"foo": "bar"
|
|
}
|
|
|
|
PUT index/_doc/3
|
|
{
|
|
"foo": "baz"
|
|
}
|
|
|
|
POST index/_refresh
|
|
|
|
GET index/_search
|
|
{
|
|
"query": {
|
|
"term": {
|
|
"foo": "BAR"
|
|
}
|
|
}
|
|
}
|
|
|
|
GET index/_search
|
|
{
|
|
"query": {
|
|
"match": {
|
|
"foo": "BAR"
|
|
}
|
|
}
|
|
}
|
|
--------------------------------
|
|
// CONSOLE
|
|
|
|
The above queries match documents 1 and 2 since `BÀR` is converted to `bar` at
|
|
both index and query time.
|
|
|
|
[source,js]
|
|
----------------------------
|
|
{
|
|
"took": $body.took,
|
|
"timed_out": false,
|
|
"_shards": {
|
|
"total": 1,
|
|
"successful": 1,
|
|
"skipped" : 0,
|
|
"failed": 0
|
|
},
|
|
"hits": {
|
|
"total": 2,
|
|
"max_score": 0.47000363,
|
|
"hits": [
|
|
{
|
|
"_index": "index",
|
|
"_type": "_doc",
|
|
"_id": "1",
|
|
"_score": 0.47000363,
|
|
"_source": {
|
|
"foo": "BÀR"
|
|
}
|
|
},
|
|
{
|
|
"_index": "index",
|
|
"_type": "_doc",
|
|
"_id": "2",
|
|
"_score": 0.47000363,
|
|
"_source": {
|
|
"foo": "bar"
|
|
}
|
|
}
|
|
]
|
|
}
|
|
}
|
|
----------------------------
|
|
// TESTRESPONSE[s/"took".*/"took": "$body.took",/]
|
|
|
|
Also, the fact that keywords are converted prior to indexing also means that
|
|
aggregations return normalized values:
|
|
|
|
[source,js]
|
|
----------------------------
|
|
GET index/_search
|
|
{
|
|
"size": 0,
|
|
"aggs": {
|
|
"foo_terms": {
|
|
"terms": {
|
|
"field": "foo"
|
|
}
|
|
}
|
|
}
|
|
}
|
|
----------------------------
|
|
// CONSOLE
|
|
// TEST[continued]
|
|
|
|
returns
|
|
|
|
[source,js]
|
|
----------------------------
|
|
{
|
|
"took": 43,
|
|
"timed_out": false,
|
|
"_shards": {
|
|
"total": 1,
|
|
"successful": 1,
|
|
"skipped" : 0,
|
|
"failed": 0
|
|
},
|
|
"hits": {
|
|
"total": 3,
|
|
"max_score": 0.0,
|
|
"hits": []
|
|
},
|
|
"aggregations": {
|
|
"foo_terms": {
|
|
"doc_count_error_upper_bound": 0,
|
|
"sum_other_doc_count": 0,
|
|
"buckets": [
|
|
{
|
|
"key": "bar",
|
|
"doc_count": 2
|
|
},
|
|
{
|
|
"key": "baz",
|
|
"doc_count": 1
|
|
}
|
|
]
|
|
}
|
|
}
|
|
}
|
|
----------------------------
|
|
// TESTRESPONSE[s/"took".*/"took": "$body.took",/]
|