resolve merge conflicts

Signed-off-by: alicejw <alicejw@amazon.com>
This commit is contained in:
alicejw 2022-05-03 13:00:52 -07:00
commit 8a8f16e67c
70 changed files with 383 additions and 282 deletions

View File

@ -29,4 +29,4 @@ end
gem "tzinfo-data", platforms: [:mingw, :mswin, :x64_mingw, :jruby]
# Performance-booster for watching directories on Windows
gem "wdm", "~> 0.1.0" if Gem.win_platform?
gem "wdm", "~> 0.1.0" if Gem.win_platform?

View File

@ -96,7 +96,7 @@ If you're migrating from an existing Logstash installation, you can install the
1. Start Logstash:
```
docker run -it --rm --name logstash --net test opensearchproject/logstash-oss-with-opensearch-output-plugin:7.13.4 -e 'input { stdin { } } output {
docker run -it --rm --name logstash --net test opensearchproject/logstash-oss-with-opensearch-output-plugin:7.16.2 -e 'input { stdin { } } output {
opensearch {
hosts => ["https://opensearch:9200"]
index => "opensearch-logstash-docker-%{+YYYY.MM.dd}"

View File

@ -0,0 +1,51 @@
---
layout: default
title: Read from OpenSearch
parent: Logstash
nav_order: 220
---
# Read from OpenSearch
As we ship Logstash events to an OpenSearch cluster using the [OpenSearch output plugin](https://github.com/opensearch-project/logstash-output-opensearch), we can also perform read operations on an OpenSearch cluster and load data into Logstash using the [OpenSearch input plugin](https://github.com/opensearch-project/logstash-input-opensearch).
The OpenSearch input plugin reads the search query results performed on an OpenSearch cluster and loads them into Logstash. This lets you replay test logs, reindex, and perform other operations based on the loaded data. You can schedule ingestions to run periodically by using
[cron expressions](https://opensearch.org/docs/latest/monitoring-plugins/alerting/cron/), or manually load data into Logstash by running the query once.
## OpenSearch input plugin
To run the OpenSearch input plugin, add the configuration to the `pipeline.conf` file within your Logstash's `config` folder. The example below runs the `match_all` query filter and loads in data once.
```yml
input {
opensearch {
hosts => "https://hostname:port"
user => "admin"
password => "admin"
index => "logstash-logs-%{+YYYY.MM.dd}"
query => "{ "query": { "match_all": {}} }"
}
}
filter {
}
output {
}
```
To ingest data according to a schedule, use a cron expression that specifies the schedule you want. For example, to load in data every minute, add `schedule => "* * * * *"` to the input section of your `pipeline.conf` file.
Like the output plugin, after adding your configuration to the `pipeline.conf` file, start Logstash by providing the path to this file:
```bash
$ bin/logstash -f config/pipeline.conf --config.reload.automatic
```
`config/pipeline.conf` is a relative path to the `pipeline.conf` file. You can use an absolute path as well.
Adding `stdout{}` to the `output{}` section of your `pipeline.conf` file prints the query results to the console.
To reindex the data into an OpenSearch domain, add the destination domain configuration in the `output{}` section like shown [here](https://opensearch.org/docs/latest/clients/logstash/ship-to-opensearch/#opensearch-output-plugin).

View File

@ -5,10 +5,10 @@ baseurl: "/docs/latest" # the subpath of your site, e.g. /blog
url: "https://opensearch.org" # the base hostname & protocol for your site, e.g. http://example.com
permalink: /:path/
opensearch_version: 1.3.1
opensearch_dashboards_version: 1.3.0
opensearch_major_minor_version: 1.3
lucene_version: 8_10_1
opensearch_version: 2.0.0-rc1
opensearch_dashboards_version: 2.0.0-rc1
opensearch_major_minor_version: 2.0-rc1
lucene_version: 9_1_0
# Build settings
markdown: kramdown

View File

@ -100,7 +100,7 @@ opensearch.hosts: ["https://localhost:9200"]
opensearch.ssl.verificationMode: none
opensearch.username: "kibanaserver"
opensearch.password: "kibanaserver"
opensearch.requestHeadersWhitelist: [ authorization,securitytenant ]
opensearch.requestHeadersAllowlist: [ authorization,securitytenant ]
#server.ssl.enabled: true
#server.ssl.certificate: /path/to/your/server/certificate
#server.ssl.key: /path/to/your/server/key

View File

@ -25,7 +25,7 @@ opensearch.hosts: ["https://localhost:9200"]
opensearch.ssl.verificationMode: full
opensearch.username: "kibanaserver"
opensearch.password: "kibanaserver"
opensearch.requestHeadersWhitelist: [ authorization,securitytenant ]
opensearch.requestHeadersAllowlist: [ authorization,securitytenant ]
server.ssl.enabled: true
server.ssl.certificate: /usr/share/opensearch-dashboards/config/client-cert.pem
server.ssl.key: /usr/share/opensearch-dashboards/config/client-cert-key.pem

View File

@ -1 +1 @@
message: "[OpenSearch 1.3.0 is live 🍾 Try the new ML commons plugin, new observability features, and much, much more!](/downloads.html)"
message: "This is a pre-release version of OpenSearch 2.0.0. Feel free to try it out and provide feedback. If you are looking for the most recent production-ready release, see the [1.x line](https://opensearch.org/lines/1x.html)"

View File

@ -1,8 +1,11 @@
{
"current": "1.3",
"past": [
"current": "2.0",
"all": [
"2.0",
"1.3",
"1.2",
"1.1",
"1.0"
]
}
],
"latest": "1.3"
}

View File

@ -104,7 +104,6 @@ GET finished_flight_job/_search
"hits" : [
{
"_index" : "finished_flight_job",
"_type" : "_doc",
"_id" : "dSNKGb8U3OJOmC4RqVCi1Q",
"_score" : 3.845883,
"_source" : {
@ -116,7 +115,6 @@ GET finished_flight_job/_search
},
{
"_index" : "finished_flight_job",
"_type" : "_doc",
"_id" : "_D7oqOy7drx9E-MG96U5RA",
"_score" : 3.845883,
"_source" : {
@ -128,7 +126,6 @@ GET finished_flight_job/_search
},
{
"_index" : "finished_flight_job",
"_type" : "_doc",
"_id" : "YuZ8tOt1OsBA54e84WuAEw",
"_score" : 3.6988301,
"_source" : {
@ -140,7 +137,6 @@ GET finished_flight_job/_search
},
{
"_index" : "finished_flight_job",
"_type" : "_doc",
"_id" : "W_-e7bVmH6eu8veJeK8ZxQ",
"_score" : 3.6988301,
"_source" : {

View File

@ -731,7 +731,6 @@ DELETE _plugins/_transform/<transform_id>
{
"delete": {
"_index": ".opensearch-ism-config",
"_type": "_doc",
"_id": "sample",
"_version": 4,
"result": "deleted",

View File

@ -533,7 +533,6 @@ DELETE _plugins/_ism/policies/policy_1
```json
{
"_index": ".opendistro-ism-config",
"_type": "_doc",
"_id": "policy_1",
"_version": 3,
"result": "deleted",

View File

@ -99,6 +99,7 @@ ISM supports the following operations:
- [read_only](#read_only)
- [read_write](#read_write)
- [replica_count](#replica_count)
- [shrink](#shrink)
- [close](#close)
- [open](#open)
- [delete](#delete)
@ -162,6 +163,59 @@ Parameter | Description | Type | Required
For information about setting replicas, see [Primary and replica shards]({{site.url}}{{site.baseurl}}/opensearch#primary-and-replica-shards).
### shrink
Allows you to reduce the number of primary shards in your indexes. With this action, you can specify:
- The number of primary shards that the target index should contain.
- A max shard size for the primary shards in the target index.
- Specify a percentage to shrink the number of primary shards in the target index.
```json
"shrink": {
"num_new_shards": 1,
"target_index_name_template": {
"source": "{{ctx.index}}_shrunken"
},
"aliases": [
"my-alias": {}
],
"force_unsafe": false
}
```
Parameter | Description | Type | Example | Required
:--- | :--- |:--- |:--- |
`num_new_shards` | The maximum number of primary shards in the shrunken index. | integer | `5` | Yes, however it cannot be used with `max_shard_size` or `percentage_of_source_shards`
`max_shard_size` | The maximum size in bytes of a shard for the target index. | keyword | `5gb` | Yes, however it cannot be used with `num_new_shards` or `percentage_of_source_shards`
`percentage_of_source_shards` | Percentage of the number of original primary shards to shrink. This parameter indicates the minimum percentage to use when shrinking the number of primary shards. Must be between 0.0 and 1.0, exclusive. | Percentage | `0.5` | Yes, however it cannot be used with `max_shard_size` or `num_new_shards`
`target_index_name_template` | The name of the shrunken index. Accepts strings and the Mustache variables `{{ctx.index}}` and `{{ctx.indexUuid}}`. | `string` or Mustache template | `{"source": "{{ctx.index}}_shrunken"}` | No
`aliases` | Aliases to add to the new index. | object | `myalias` | No, but must be an array of alias objects
`force_unsafe` | If true, executes the shrink action even if there are no replicas. | boolean | `false` | No
If you want to add `aliases` to the action, the parameter must include an array of [alias objects]({{site.url}}{{site.baseurl}}/opensearch/rest-api/alias/). For example,
```json
"aliases": [
{
"my-alias": {}
},
{
"my-second-alias": {
"is_write_index": false,
"filter": {
"multi_match": {
"query": "QUEEN",
"fields": ["speaker", "text_entry"]
}
},
"index_routing" : "1",
"search_routing" : "1"
}
},
]
```
### close
Closes the managed index.

View File

@ -57,7 +57,6 @@ layout: table_wrappers
</a>
</div>
<nav role="navigation" aria-label="Main" id="site-nav" class="site-nav">
{% assign past_versions = site.data.versions.past | join: ";" %}
<div class="version-wrapper">
<version-selector selected="{{ site.data.versions.current }}"></version-selector>
</div>

View File

@ -173,7 +173,6 @@ POST /_plugins/_ml/models/_search
"hits" : [
{
"_index" : ".plugins-ml-model",
"_type" : "_doc",
"_id" : "-QkKJX8BvytMh9aUeuLD",
"_version" : 1,
"_seq_no" : 12,
@ -188,7 +187,6 @@ POST /_plugins/_ml/models/_search
},
{
"_index" : ".plugins-ml-model",
"_type" : "_doc",
"_id" : "OxkvHn8BNJ65KnIpck8x",
"_version" : 1,
"_seq_no" : 2,
@ -219,7 +217,6 @@ The API returns the following:
```json
{
"_index" : ".plugins-ml-model",
"_type" : "_doc",
"_id" : "MzcIJX8BA7mbufL6DOwl",
"_version" : 2,
"result" : "deleted",
@ -602,7 +599,6 @@ GET /_plugins/_ml/tasks/_search
"hits" : [
{
"_index" : ".plugins-ml-task",
"_type" : "_doc",
"_id" : "_wnLJ38BvytMh9aUi-Ia",
"_version" : 4,
"_seq_no" : 29,
@ -622,7 +618,6 @@ GET /_plugins/_ml/tasks/_search
},
{
"_index" : ".plugins-ml-task",
"_type" : "_doc",
"_id" : "wwRRLX8BydmmU1x6I-AI",
"_version" : 3,
"_seq_no" : 38,
@ -658,7 +653,6 @@ The API returns the following:
```json
{
"_index" : ".plugins-ml-task",
"_type" : "_doc",
"_id" : "xQRYLX8BydmmU1x6nuD3",
"_version" : 4,
"result" : "deleted",

View File

@ -1202,7 +1202,6 @@ DELETE _plugins/_anomaly_detection/detectors/<detectorId>
```json
{
"_index": ".opensearch-anomaly-detectors",
"_type": "_doc",
"_id": "70TxTXwBjd8s6RK4j1Pj",
"_version": 2,
"result": "deleted",
@ -1792,7 +1791,6 @@ POST _plugins/_anomaly_detection/detectors/_search
"hits": [
{
"_index": ".opensearch-anomaly-detectors",
"_type": "_doc",
"_id": "Zi5zTXwBwf_U8gjUTfJG",
"_version": 1,
"_seq_no": 1,
@ -1924,7 +1922,6 @@ POST _plugins/_anomaly_detection/detectors/tasks/_search
"hits": [
{
"_index": ".opensearch-anomaly-detection-state",
"_type": "_doc",
"_id": "fm-RTXwBYwCbWecgB753",
"_version": 34,
"_seq_no": 928,
@ -2263,7 +2260,6 @@ For information about the response body fields, see [Anomaly result mapping]({{s
"hits": [
{
"_index": ".opensearch-anomaly-results-history-2021.10.04-1",
"_type": "_doc",
"_id": "686KTXwB6HknB84SMr6G",
"_version": 1,
"_seq_no": 103622,
@ -2389,7 +2385,6 @@ POST _plugins/_anomaly_detection/detectors/results/_search
"hits": [
{
"_index": ".opensearch-anomaly-results-history-2021.10.04-1",
"_type": "_doc",
"_id": "VRyRTXwBDx7vzPBV8jYC",
"_version": 1,
"_seq_no": 149657,

View File

@ -967,7 +967,6 @@ DELETE _plugins/_alerting/monitors/<monitor_id>
```json
{
"_index": ".opensearch-scheduled-jobs",
"_type": "_doc",
"_id": "OYAHOmgBl3cmwnqZl_yH",
"_version": 2,
"result": "deleted",

View File

@ -116,8 +116,6 @@ GET localhost:9600/_plugins/_performanceanalyzer/metrics/units
{
"Disk_Utilization": "%",
"Cache_Request_Hit": "count",
"TermVectors_Memory": "B",
"Segments_Memory": "B",
"HTTP_RequestDocs": "count",
"Net_TCP_Lost": "segments/flow",
"Refresh_Time": "ms",
@ -138,7 +136,6 @@ GET localhost:9600/_plugins/_performanceanalyzer/metrics/units
"Merge_CurrentEvent": "count",
"Indexing_Buffer": "B",
"Bitset_Memory": "B",
"Norms_Memory": "B",
"Net_PacketDropRate4": "packets/s",
"Heap_Committed": "B",
"Net_PacketDropRate6": "packets/s",
@ -163,7 +160,6 @@ GET localhost:9600/_plugins/_performanceanalyzer/metrics/units
"Flush_Event": "count",
"Net_TCP_RxQ": "segments/flow",
"Refresh_Event": "count",
"Points_Memory": "B",
"Flush_Time": "ms",
"Heap_Init": "B",
"CPU_Utilization": "cores",
@ -175,8 +171,6 @@ GET localhost:9600/_plugins/_performanceanalyzer/metrics/units
"Net_TCP_SendCWND": "B/flow",
"Cache_Request_Eviction": "count",
"Segments_Total": "count",
"Terms_Memory": "B",
"DocValues_Memory": "B",
"Heap_Used": "B",
"Cache_FieldData_Eviction": "count",
"IO_TotalSyscallRate": "count/s",
@ -184,7 +178,6 @@ GET localhost:9600/_plugins/_performanceanalyzer/metrics/units
"Net_Throughput": "B/s",
"Paging_RSS": "pages",
"Indexing_ThrottleTime": "ms",
"StoredFields_Memory": "B",
"IndexWriter_Memory": "B",
"Master_PendingQueueSize": "count",
"Net_TCP_SSThresh": "B/flow",

View File

@ -88,7 +88,7 @@ mount -o remount /dev/shm
Performance Analyzer supports encryption in transit for requests. It currently does *not* support client or server authentication for requests. To enable encryption in transit, edit `performance-analyzer.properties` in your `$OPENSEARCH_HOME` directory:
```bash
vi $OPENSEARCH_HOME/plugins/opensearch-performance-analyzer/pa_config/performance-analyzer.properties
vi $OPENSEARCH_HOME/config/opensearch-performance-analyzer/performance-analyzer.properties
```
Change the following lines to configure encryption in transit. Note that `certificate-file-path` must be a certificate for the server, not a root CA:

View File

@ -261,48 +261,6 @@ This list is extensive. We recommend using Ctrl/Cmd + F to find what you're look
<td>The number of segments.
</td>
</tr>
<tr>
<td>Segments_Memory
</td>
<td>Estimated memory usage of segments in bytes.
</td>
</tr>
<tr>
<td>Terms_Memory
</td>
<td>Estimated memory usage of terms dictionaries in bytes.
</td>
</tr>
<tr>
<td>StoredFields_Memory
</td>
<td>Estimated memory usage of stored fields in bytes.
</td>
</tr>
<tr>
<td>TermVectors_Memory
</td>
<td>Estimated memory usage of term vectors in bytes.
</td>
</tr>
<tr>
<td>Norms_Memory
</td>
<td>Estimated memory usage of norms (normalization factors) in bytes.
</td>
</tr>
<tr>
<td>Points_Memory
</td>
<td>Estimated memory usage of points in bytes.
</td>
</tr>
<tr>
<td>DocValues_Memory
</td>
<td>Estimated memory usage of doc values in bytes.
</td>
</tr>
<tr>
<td>IndexWriter_Memory
</td>

View File

@ -709,7 +709,19 @@ GET opensearch_dashboards_sample_data_logs/_search
}
}
```
If you add a document with malformed fields to an index that has `ip_range` set to `false` in its mappings, OpenSearch rejects the entire document. You can set `ignore_malformed` to `true` to specify that OpenSearch should ignore malformed fields. The default is `false`.
```json
...
"mappings": {
"properties": {
"ips": {
"type": "ip_range",
"ignore_malformed": true
}
}
}
```
## filter, filters
A `filter` aggregation is a query clause, exactly like a search query — `match` or `term` or `range`. You can use the `filter` aggregation to narrow down the entire set of documents to a specific set before creating buckets.

View File

@ -37,14 +37,12 @@ After you assess all these requirements, we recommend you use a benchmark testin
This page demonstrates how to work with the different node types. It assumes that you have a four-node cluster similar to the preceding illustration.
## Prerequisites
Before you get started, you must install and configure OpenSearch on all of your nodes. For information about the available options, see [Install and configure OpenSearch]({{site.url}}{{site.baseurl}}/opensearch/install/).
After you're done, use SSH to connect to each node, then open the `config/opensearch.yml` file. You can set all configurations for your cluster in this file.
## Step 1: Name a cluster
Specify a unique name for the cluster. If you don't specify a cluster name, it's set to `opensearch` by default. Setting a descriptive cluster name is important, especially if you want to run multiple clusters inside a single network.
@ -63,12 +61,10 @@ cluster.name: opensearch-cluster
Make the same change on all the nodes to make sure that they'll join to form a cluster.
## Step 2: Set node attributes for each node in a cluster
After you name the cluster, set node attributes for each node in your cluster.
#### Cluster manager node
Give your cluster manager node a name. If you don't specify a name, OpenSearch assigns a machine-generated name that makes the node difficult to monitor and troubleshoot.
@ -83,7 +79,6 @@ You can also explicitly specify that this node is a cluster manager node, even t
node.roles: [ cluster_manager ]
```
#### Data nodes
Change the name of two nodes to `opensearch-d1` and `opensearch-d2`, respectively:
@ -91,6 +86,7 @@ Change the name of two nodes to `opensearch-d1` and `opensearch-d2`, respectivel
```yml
node.name: opensearch-d1
```
```yml
node.name: opensearch-d2
```
@ -103,7 +99,6 @@ node.roles: [ data, ingest ]
You can also specify any other attributes that you'd like to set for the data nodes.
#### Coordinating node
Change the name of the coordinating node to `opensearch-c1`:
@ -118,7 +113,6 @@ Every node is a coordinating node by default, so to make this node a dedicated c
node.roles: []
```
## Step 3: Bind a cluster to specific IP addresses
`network_host` defines the IP address used to bind the node. By default, OpenSearch listens on a local host, which limits the cluster to a single node. You can also use `_local_` and `_site_` to bind to any loopback or site-local address, whether IPv4 or IPv6:
@ -135,7 +129,6 @@ network.host: <IP address of the node>
Make sure to configure these settings on all of your nodes.
## Step 4: Configure discovery hosts for a cluster
Now that you've configured the network hosts, you need to configure the discovery hosts.
@ -150,7 +143,6 @@ For example, for `opensearch-cluster_manager` the line looks something like this
discovery.seed_hosts: ["<private IP of opensearch-d1>", "<private IP of opensearch-d2>", "<private IP of opensearch-c1>"]
```
## Step 5: Start the cluster
After you set the configurations, start OpenSearch on all nodes:
@ -181,7 +173,6 @@ x.x.x.x 23 38 0 0.12 0.07 0.06 md - o
To better understand and monitor your cluster, use the [cat API]({{site.url}}{{site.baseurl}}/opensearch/catapis/).
## (Advanced) Step 6: Configure shard allocation awareness or forced awareness
If your nodes are spread across several geographical zones, you can configure shard allocation awareness to allocate all replica shards to a zone thats different from their primary shard.
@ -193,6 +184,7 @@ To configure shard allocation awareness, add zone attributes to `opensearch-d1`
```yml
node.attr.zone: zoneA
```
```yml
node.attr.zone: zoneB
```
@ -233,7 +225,6 @@ If that is not the case, and `opensearch-d1` and `opensearch-d2` do not have the
Choosing allocation awareness or forced awareness depends on how much space you might need in each zone to balance your primary and replica shards.
## (Advanced) Step 7: Set up a hot-warm architecture
You can design a hot-warm architecture where you first index your data to hot nodes---fast and expensive---and after a certain period of time move them to warm nodes---slow and cheap.
@ -247,6 +238,7 @@ To configure a hot-warm storage architecture, add `temp` attributes to `opensear
```yml
node.attr.temp: hot
```
```yml
node.attr.temp: warm
```
@ -317,7 +309,6 @@ A popular approach is to configure your [index templates]({{site.url}}{{site.bas
You can then use the [Index State Management (ISM)]({{site.url}}{{site.baseurl}}/im-plugin/) plugin to periodically check the age of an index and specify actions to take on it. For example, when the index reaches a specific age, change the `index.routing.allocation.require.temp` setting to `warm` to automatically move your data from hot nodes to warm nodes.
## Next steps
If you are using the security plugin, the previous request to `_cat/nodes?v` might have failed with an initialization error. For full guidance around using the security plugin, see [Security configuration]({{site.url}}{{site.baseurl}}/security-plugin/configuration/index/).

View File

@ -6,13 +6,13 @@ nav_order: 12
# Index aliases
An alias is a virtual index name that can point to one or more indices.
An alias is a virtual index name that can point to one or more indexes.
If your data is spread across multiple indices, rather than keeping track of which indices to query, you can create an alias and query it instead.
If your data is spread across multiple indexes, rather than keeping track of which indexes to query, you can create an alias and query it instead.
For example, if youre storing logs into indices based on the month and you frequently query the logs for the previous two months, you can create a `last_2_months` alias and update the indices it points to each month.
For example, if youre storing logs into indexes based on the month and you frequently query the logs for the previous two months, you can create a `last_2_months` alias and update the indexes it points to each month.
Because you can change the indices an alias points to at any time, referring to indices using aliases in your applications allows you to reindex your data without any downtime.
Because you can change the indexes an alias points to at any time, referring to indexes using aliases in your applications allows you to reindex your data without any downtime.
---
@ -63,7 +63,7 @@ To check if `alias1` refers to `index-1`, run the following command:
GET alias1
```
## Add or remove indices
## Add or remove indexes
You can perform multiple actions in the same `_aliases` operation.
For example, the following command removes `index-1` and adds `index-2` to `alias1`:
@ -90,7 +90,7 @@ POST _aliases
The `add` and `remove` actions occur atomically, which means that at no point will `alias1` point to both `index-1` and `index-2`.
You can also add indices based on an index pattern:
You can also add indexes based on an index pattern:
```json
POST _aliases
@ -108,7 +108,7 @@ POST _aliases
## Manage aliases
To list the mapping of aliases to indices, run the following command:
To list the mapping of aliases to indexes, run the following command:
```json
GET _cat/aliases?v
@ -121,7 +121,7 @@ alias index filter routing.index routing.search
alias1 index-1 * - -
```
To check which indices an alias points to, run the following command:
To check which indexes an alias points to, run the following command:
```json
GET _alias/alias1
@ -166,7 +166,7 @@ PUT index-1
## Create filtered aliases
You can create a filtered alias to access a subset of documents or fields from the underlying indices.
You can create a filtered alias to access a subset of documents or fields from the underlying indexes.
This command adds only a specific timestamp field to `alias1`:

View File

@ -68,16 +68,16 @@ PUT movies/_doc/1
Because you must specify an ID, if you run this command 10 times, you still have just one document indexed with the `_version` field incremented to 10.
Indices default to one primary shard and one replica. If you want to specify non-default settings, create the index before adding documents:
Indexes default to one primary shard and one replica. If you want to specify non-default settings, create the index before adding documents:
```json
PUT more-movies
{ "settings": { "number_of_shards": 6, "number_of_replicas": 2 } }
```
## Naming restrictions for indices
## Naming restrictions for indexes
OpenSearch indices have the following naming restrictions:
OpenSearch indexes have the following naming restrictions:
- All letters must be lowercase.
- Index names can't begin with underscores (`_`) or hyphens (`-`).

View File

@ -6,7 +6,7 @@ nav_order: 15
# Index templates
Index templates let you initialize new indices with predefined mappings and settings. For example, if you continuously index log data, you can define an index template so that all of these indices have the same number of shards and replicas.
Index templates let you initialize new indexes with predefined mappings and settings. For example, if you continuously index log data, you can define an index template so that all of these indexes have the same number of shards and replicas.
### Create a template
@ -95,7 +95,7 @@ GET logs-2020-01-01
}
```
Any additional indices that match this pattern---`logs-2020-01-02`, `logs-2020-01-03`, and so on---will inherit the same mappings and settings.
Any additional indexes that match this pattern---`logs-2020-01-02`, `logs-2020-01-03`, and so on---will inherit the same mappings and settings.
Index patterns cannot contain any of the following characters: `:`, `"`, `+`, `/`, `\`, `|`, `?`, `#`, `>`, and `<`.
@ -127,7 +127,7 @@ HEAD _index_template/<name>
### Configure multiple templates
You can create multiple index templates for your indices. If the index name matches more than one template, OpenSearch merges all mappings and settings from all matching templates and applies them to the index.
You can create multiple index templates for your indexes. If the index name matches more than one template, OpenSearch merges all mappings and settings from all matching templates and applies them to the index.
The settings from the more recently created index templates override the settings of older index templates. So, you can first define a few common settings in a generic template that can act as a catch-all and then add more specialized settings as required.

View File

@ -43,11 +43,11 @@ services:
- ./admin.pem:/usr/share/opensearch/config/admin.pem
- ./admin-key.pem:/usr/share/opensearch/config/admin-key.pem
- ./custom-opensearch.yml:/usr/share/opensearch/config/opensearch.yml
- ./internal_users.yml:/usr/share/opensearch/plugins/opensearch-security/securityconfig/internal_users.yml
- ./roles_mapping.yml:/usr/share/opensearch/plugins/opensearch-security/securityconfig/roles_mapping.yml
- ./tenants.yml:/usr/share/opensearch/plugins/opensearch-security/securityconfig/tenants.yml
- ./roles.yml:/usr/share/opensearch/plugins/opensearch-security/securityconfig/roles.yml
- ./action_groups.yml:/usr/share/opensearch/plugins/opensearch-security/securityconfig/action_groups.yml
- ./internal_users.yml:/usr/share/opensearch/config/opensearch-security/internal_users.yml
- ./roles_mapping.yml:/usr/share/opensearch/config/opensearch-security/roles_mapping.yml
- ./tenants.yml:/usr/share/opensearch/config/opensearch-security/tenants.yml
- ./roles.yml:/usr/share/opensearch/config/opensearch-security/roles.yml
- ./action_groups.yml:/usr/share/opensearch/config/opensearch-security/action_groups.yml
ports:
- 9200:9200
- 9600:9600 # required for Performance Analyzer
@ -79,11 +79,11 @@ services:
- ./admin.pem:/usr/share/opensearch/config/admin.pem
- ./admin-key.pem:/usr/share/opensearch/config/admin-key.pem
- ./custom-opensearch.yml:/usr/share/opensearch/config/opensearch.yml
- ./internal_users.yml:/usr/share/opensearch/plugins/opensearch-security/securityconfig/internal_users.yml
- ./roles_mapping.yml:/usr/share/opensearch/plugins/opensearch-security/securityconfig/roles_mapping.yml
- ./tenants.yml:/usr/share/opensearch/plugins/opensearch-security/securityconfig/tenants.yml
- ./roles.yml:/usr/share/opensearch/plugins/opensearch-security/securityconfig/roles.yml
- ./action_groups.yml:/usr/share/opensearch/plugins/opensearch-security/securityconfig/action_groups.yml
- ./internal_users.yml:/usr/share/opensearch/config/opensearch-security/internal_users.yml
- ./roles_mapping.yml:/usr/share/opensearch/config/opensearch-security/roles_mapping.yml
- ./tenants.yml:/usr/share/opensearch/config/opensearch-security/tenants.yml
- ./roles.yml:/usr/share/opensearch/config/opensearch-security/roles.yml
- ./action_groups.yml:/usr/share/opensearch/config/opensearch-security/action_groups.yml
networks:
- opensearch-net
opensearch-dashboards:
@ -133,7 +133,7 @@ opendistro_security.audit.config.disabled_rest_categories: NONE
opendistro_security.audit.config.disabled_transport_categories: NONE
```
Use this same override process to specify new [authentication settings]({{site.url}}{{site.baseurl}}/security-plugin/configuration/configuration/) in `/usr/share/opensearch/plugins/opensearch-security/securityconfig/config.yml`, as well as new default [internal users, roles, mappings, action groups, and tenants]({{site.url}}{{site.baseurl}}/security-plugin/configuration/yaml/).
Use this same override process to specify new [authentication settings]({{site.url}}{{site.baseurl}}/security-plugin/configuration/configuration/) in `/usr/share/opensearch/config/opensearch-security/config.yml`, as well as new default [internal users, roles, mappings, action groups, and tenants]({{site.url}}{{site.baseurl}}/security-plugin/configuration/yaml/).
To start the cluster, run `docker-compose up`.

View File

@ -222,7 +222,7 @@ You can also configure `docker-compose.yml` and `opensearch.yml` [to take your o
docker ps # Look up the container id
docker exec -it <container-id> /bin/bash
# Inside container
cd plugins/opensearch_performance_analyzer/pa_config/
cd config/opensearch-performance-analyzer/
vi performance-analyzer.properties
```

View File

@ -64,6 +64,25 @@ bin/opensearch-plugin list
</tr>
</thead>
<tbody>
<tr>
<td>2.0.0.0-rc1</td>
<td>
<pre>opensearch-alerting 2.0.0.0-rc1
opensearch-anomaly-detection 2.0.0.0-rc1
opensearch-asynchronous-search 2.0.0.0-rc1
opensearch-cross-cluster-replication 2.0.0.0-rc1
opensearch-index-management 2.0.0.0-rc1
opensearch-job-scheduler 2.0.0.0-rc1
opensearch-knn 2.0.0.0-rc1
opensearch-ml 2.0.0.0-rc1
opensearch-observability 2.0.0.0-rc1
opensearch-performance-analyzer 2.0.0.0-rc1
opensearch-reports-scheduler 2.0.0.0-rc1
opensearch-security 2.0.0.0-rc1
opensearch-sql 2.0.0.0-rc1
</pre>
</td>
</tr>
<tr>
<td>1.3.1</td>
<td>

View File

@ -59,7 +59,7 @@ In a tarball installation, Performance Analyzer collects data when it is enabled
```bash
cd /usr/share/opensearch # navigate to the OpenSearch home directory
cd plugins/opensearch_performance_analyzer/pa_config/
cd config/opensearch-performance-analyzer/
vi performance-analyzer.properties
```
@ -114,7 +114,7 @@ In a tarball installation, Performance Analyzer collects data when it is enabled
1. Launch the agent CLI:
```bash
OPENSEARCH_HOME="$PWD" ./bin/performance-analyzer-agent-cli
OPENSEARCH_HOME="$PWD" OPENSEARCH_PATH_CONF="$PWD/config" ./bin/performance-analyzer-agent-cli
```
1. In a separate window, enable the Performance Analyzer plugin:

105
_opensearch/mappings.md Normal file
View File

@ -0,0 +1,105 @@
---
layout: default
title: Mapping
nav_order: 13
---
# About Mappings
You can define how documents and their fields are stored and indexed by creating a mapping.
If you're just starting to build out your cluster and data, you may not know exactly how your data should be stored. In those cases, you can use dynamic mappings, which tell OpenSearch to dynamically add data and its fields. However, if you know exactly what types your data falls under and want to enforce that standard, then you can use explicit mappings.
For example, if you want to indicate that `year` should be of type `text` instead of an `integer`, and `age` should be an `integer`, you can do so with explicit mappings. Using dynamic mapping OpenSearch might interpret both `year` and `age` as integers.
This section provides an example for how to create an index mapping, and how to add a document to it that will get ip_range validated.
#### Table of contents
1. TOC
{:toc}
---
## Dynamic mapping
When you index a document, OpenSearch adds fields automatically with dynamic mapping. You can also explicitly add fields to an index mapping.
#### Dynamic mapping types
Type | Description
:--- | :---
null | A `null` field can't be indexed or searched. When a field is set to null, OpenSearch behaves as if that field has no values.
boolean | OpenSearch accepts `true` and `false` as boolean values. An empty string is equal to `false.`
float | A single-precision 32-bit floating point number.
double | A double-precision 64-bit floating point number.
integer | A signed 32-bit number.
object | Objects are standard JSON objects, which can have fields and mappings of their own. For example, a `movies` object can have additional properties such as `title`, `year`, and `director`.
array | Arrays in OpenSearch can only store values of one type, such as an array of just integers or strings. Empty arrays are treated as though they are fields with no values.
text | A string sequence of characters that represent full-text values.
keyword | A string sequence of structured characters, such as an email address or ZIP code.
date detection string | Enabled by default, if new string fields match a date's format, then the string is processed as a `date` field. For example, `date: "2012/03/11"` is processed as a date.
numeric detection string | If disabled, OpenSearch may automatically process numeric values as strings when they should be processed as numbers. When enabled, OpenSearch can process strings into `long`, `integer`, `short`, `byte`, `double`, `float`, `half_float`, `scaled_float`, and `unsigned_long`. Default is disabled.
## Explicit mapping
If you know exactly what your field data types need to be, you can specify them in your request body when creating your index.
```json
{
"mappings": {
"properties": {
"year": { "type" : "text" },
"age": { "type" : "integer" },
"director":{ "type" : "text" }
}
}
}
```
### Response
```json
{
"acknowledged": true,
"shards_acknowledged": true,
"index": "sample-index1"
}
```
---
## Mapping example usage
The following example shows how to create a mapping to specify that OpenSearch should ignore any documents with malformed ip addresses that do not conform to the `ip_range` data type. You accomplish this by setting the `ignore_malformed` parameter to `true`.
### Create an index with an ip_range mapping
To create an index, use a PUT request:
```json
PUT _index_ip
{
"mappings": {
"dynamic_templates": [
{
"ip_range": {
"match": "*ip_range",
"mapping": {
"type": "ip_range",
"ignore_malformed": true
}
}
}
]
}
}
```
You can add a document to your index that has an IP range specified:
```json
PUT _index_ip/_doc/<id>
{
"source_ip_range": "192.168.1.1/32"
}
```
This indexed ip_range does not throw an error because `ignore_malformed` is set to true.

View File

@ -113,7 +113,6 @@ GET shakespeare/_search
"hits": [
{
"_index": "shakespeare",
"_type": "_doc",
"_id": "88020",
"_score": 11.356054,
"_source": {

View File

View File

@ -41,7 +41,6 @@ GET _search?q=speaker:queen
"hits": [
{
"_index": "new_shakespeare",
"_type": "_doc",
"_id": "28559",
"_score": 4.4368687,
"_source": {
@ -90,7 +89,6 @@ With query DSL, however, you can include an HTTP request body to look for result
"hits": [
{
"_index": "new_shakespeare",
"_type": "_doc",
"_id": "100763",
"_score": 7.8623476,
"_source": {
@ -105,7 +103,6 @@ With query DSL, however, you can include an HTTP request body to look for result
},
{
"_index": "shakespeare",
"_type": "_doc",
"_id": "28559",
"_score": 5.8923807,
"_source": {

View File

@ -95,7 +95,6 @@ The search query “To be, or not to be” is analyzed and tokenized into an arr
"hits" : [
{
"_index" : "shakespeare",
"_type" : "_doc",
"_id" : "34229",
"_score" : 17.419369,
"_source" : {
@ -110,7 +109,6 @@ The search query “To be, or not to be” is analyzed and tokenized into an arr
},
{
"_index" : "shakespeare",
"_type" : "_doc",
"_id" : "109930",
"_score" : 14.883024,
"_source" : {
@ -125,7 +123,6 @@ The search query “To be, or not to be” is analyzed and tokenized into an arr
},
{
"_index" : "shakespeare",
"_type" : "_doc",
"_id" : "103117",
"_score" : 14.782743,
"_source" : {
@ -180,7 +177,6 @@ GET shakespeare/_search
"hits" : [
{
"_index" : "shakespeare",
"_type" : "_doc",
"_id" : "32700",
"_score" : 4.2540946,
"_source" : {
@ -195,7 +191,6 @@ GET shakespeare/_search
},
{
"_index" : "shakespeare",
"_type" : "_doc",
"_id" : "32702",
"_score" : 4.2540946,
"_source" : {
@ -210,7 +205,6 @@ GET shakespeare/_search
},
{
"_index" : "shakespeare",
"_type" : "_doc",
"_id" : "32709",
"_score" : 4.2540946,
"_source" : {

View File

@ -123,7 +123,6 @@ In the response, pay particular attention to the top-level `errors` boolean. If
{
"index": {
"_index": "movies",
"_type": "_doc",
"_id": "tt1979320",
"_version": 1,
"result": "created",
@ -140,7 +139,6 @@ In the response, pay particular attention to the top-level `errors` boolean. If
{
"create": {
"_index": "movies",
"_type": "_doc",
"_id": "tt1392214",
"status": 409,
"error": {
@ -155,7 +153,6 @@ In the response, pay particular attention to the top-level `errors` boolean. If
{
"update": {
"_index": "movies",
"_type": "_doc",
"_id": "tt0816711",
"status": 404,
"error": {

View File

@ -44,7 +44,6 @@ wait_for_active_shards | String | The number of active shards that must be avail
```json
{
"_index": "sample-index1",
"_type": "_doc",
"_id": "1",
"_version": 2,
"result": "deleted",
@ -63,7 +62,6 @@ wait_for_active_shards | String | The number of active shards that must be avail
Field | Description
:--- | :---
_index | The name of the index.
_type | The document's type. OpenSearch only supports one type, which is `_doc`.
_id | The document's ID.
_version | The document's version.
_result | The result of the delete operation.

View File

@ -51,7 +51,6 @@ version_type | Enum | Retrieves a specifically typed document. Available options
```json
{
"_index": "sample-index1",
"_type": "_doc",
"_id": "1",
"_version": 1,
"_seq_no": 0,
@ -68,7 +67,6 @@ version_type | Enum | Retrieves a specifically typed document. Available options
Field | Description
:--- | :---
_index | The name of the index.
_type | The document's type. OpenSearch only supports one type, which is `_doc`.
_id | The document's ID.
_version | The document's version number. Updated whenever the document changes.
_seq_no | The sequnce number assigned when the document is indexed.

View File

@ -64,7 +64,6 @@ Your request body must contain the information you want to index.
```json
{
"_index": "sample-index",
"_type": "_doc",
"_id": "1",
"_version": 1,
"result": "created",
@ -83,7 +82,6 @@ Your request body must contain the information you want to index.
Field | Description
:--- | :---
_index | The name of the index.
_type | The document's type. OpenSearch supports only one type, which is `_doc`.
_id | The document's ID.
_version | The document's version.
result | The result of the index operation.

View File

@ -41,12 +41,10 @@ GET sample-index1/_mget
{
"docs": [
{
"_type": "_doc",
"_id": "1",
"_source": false
},
{
"_type": "_doc",
"_id": "2",
"_source": [ "Director", "Title" ]
}
@ -98,7 +96,6 @@ ids | Array | IDs of the documents to retrieve. Only allowed when an index is sp
"docs": [
{
"_index": "sample-index1",
"_type": "_doc",
"_id": "1",
"_version": 4,
"_seq_no": 5,
@ -111,7 +108,6 @@ ids | Array | IDs of the documents to retrieve. Only allowed when an index is sp
},
{
"_index": "sample-index2",
"_type": "_doc",
"_id": "1",
"_version": 1,
"_seq_no": 6,
@ -131,7 +127,6 @@ ids | Array | IDs of the documents to retrieve. Only allowed when an index is sp
Field | Description
:--- | :---
_index | The name of the index.
_type | The document's type. OpenSearch only supports one type, which is `_doc`.
_id | The document's ID.
_version | The document's version number. Updated whenever the document changes.
_seq_no | The sequnce number assigned when the document is indexed.

View File

@ -91,7 +91,6 @@ You can also use a script to tell OpenSearch how to update your document.
```json
{
"_index": "sample-index1",
"_type": "_doc",
"_id": "1",
"_version": 3,
"result": "updated",
@ -110,7 +109,6 @@ You can also use a script to tell OpenSearch how to update your document.
Field | Description
:--- | :---
_index | The name of the index.
_type | The document's type. OpenSearch only supports one type, which is `_doc`.
_id | The document's ID.
_version | The document's version.
_result | The result of the delete operation.

View File

@ -78,7 +78,6 @@ Parameter | Type | Description | Required
```json
{
"_index" : "kibana_sample_data_ecommerce",
"_type" : "_doc",
"_id" : "EVz1Q3sBgg5eWQP6RSte",
"matched" : true,
"explanation" : {

View File

@ -61,7 +61,6 @@ You can include the following URL parameters in your request. All parameters are
Parameter | Type | Description
:--- | :--- | :---
include_type_name | Boolean | If `true`, the request expects a type in the body of mappings. Because OpenSearch indices all have a type of `_doc`, we recommend that this parameter is left as the default of `false.`
wait_for_active_shards | String | Specifies the number of active shards that must be available before OpenSearch processes the request. Default is 1 (only the primary shard). Set to `all` or a positive integer. Values greater than 1 require replicas. For example, if you specify a value of 3, the index must have two replicas distributed across two additional nodes for the request to succeed.
master_timeout | Time | How long to wait for a connection to the master node. Default is `30s`.
timeout | Time | How long to wait for the request to return. Default is `30s`.
@ -113,52 +112,4 @@ index.routing.allocation.enable | Specifies options for the indexs shard allo
index.routing.rebalance.enable | Enables shard rebalancing for the index. Available options are `all` (allow rebalancing for all shards), `primaries` (allow rebalancing only for primary shards), `replicas` (allow rebalancing only for replicas), and `none` (do not allow rebalancing). Default is `all`.
index.gc_deletes | Amount of time to retain a deleted document's version number. Default is `60s`.
index.default_pipeline | The default ingest node pipeline for the index. If the default pipeline is set and the pipeline does not exist, then index requests fail. The pipeline name `_none` specifies that the index does not have an ingest pipeline.
index.final_pipeline | The final ingest node pipeline for the index. If the final pipeline is set and the pipeline does not exist, then index requests fail. The pipeline name `_none` specifies that the index does not have an ingest pipeline.
### Mappings
Mappings define how a documents and its fields are stored and indexed. If you're just starting to build out your cluster and data, you may not know exactly how your data should be stored. In those cases, you can use dynamic mappings, which tell OpenSearch to dynamically add data and their fields. However, if you know exactly what types your data fall under and want to enforce that standard, then you can use explicit mappings.
For example, if you want to indicate that `year` should be of type `text` instead of an `integer`, and `age` should be an `integer`, you can do so with explicit mappings. Using dynamic mapping, OpenSearch might interpret both `year` and `age` as integers.
#### Dynamic mapping types
Type | Description
:--- | :---
null | A `null` field can't be indexed or searched. When a field is set to null, OpenSearch behaves as if that field has no values.
boolean | OpenSearch accepts `true` and `false` as boolean values. An empty string is equal to `false.`
float | A single-precision 32-bit floating point number.
double | A double-precision 64-bit floating point number.
integer | A signed 32-bit number.
object | Objects are standard JSON objects, which can have fields and mappings of their own. For example, a `movies` object can have additional properties such as `title`, `year`, and `director`.
array | Arrays in OpenSearch can only store values of one type, such as an array of just integers or strings. Empty arrays are treated as though they are fields with no values.
text | A string sequence of characters that represent full-text values.
keyword | A string sequence of structured characters, such as an email or ZIP code.
date detection string | Enabled by default, if new string fields match a date's format, then the string is processed as a `date` field. For example, `date: "2012/03/11"` is processed as a date.
numeric detection string | If disabled, OpenSearch may automatically process numeric values as strings when they should be processed as numbers. When enabled, OpenSearch can process strings into `long`, `integer`, `short`, `byte`, `double`, `float`, `half_float`, `scaled_float`, `unsigned_long`. Default is disabled.
#### Explicit mapping
If you know exactly what your data's typings need to be, you can specify them in your request body when creating your index.
```json
{
"mappings": {
"properties": {
"year": { "type" : "text" },
"age": { "type" : "integer" },
"director":{ "type" : "text" }
}
}
}
```
## Response
```json
{
"acknowledged": true,
"shards_acknowledged": true,
"index": "sample-index1"
}
```
index.final_pipeline | The final ingest node pipeline for the index. If the final pipeline is set and the pipeline does not exist, then index requests fail. The pipeline name `_none` specifies that the index does not have an ingest pipeline.

View File

@ -55,6 +55,7 @@ Parameter | Data Type | Description
allow_no_indices | Boolean | Whether to ignore wildcards that dont match any indexes. Default is `true`.
expand_wildcards | String | Expands wildcard expressions to different indexes. Combine multiple values with commas. Available values are `all` (match all indexes), `open` (match open indexes), `closed` (match closed indexes), `hidden` (match hidden indexes), and `none` (do not accept wildcard expressions), which must be used with `open`, `closed`, or both. Default is `open`.
ignore_unavailable | Boolean | If true, OpenSearch does not include missing or closed indexes in the response.
ignore_malformed | Boolean | Use this parameter with the `ip_range` data type to specify that OpenSearch should ignore malformed fields. If `true`, OpenSearch does not include entries that do not match the IP range specified in the index in the response. The default is `false`.
master_timeout | Time | How long to wait for a connection to the master node. Default is `30s`.
timeout | Time | How long to wait for the response to return. Default is `30s`.
write_index_only | Boolean | Whether OpenSearch should apply mapping updates only to the write index.

View File

@ -85,7 +85,6 @@ Responses vary based on which path and HTTP method you choose.
{
"doc" : {
"_index" : "index",
"_type" : "_doc",
"_id" : "id",
"_source" : {
"location" : "new-new",
@ -99,7 +98,6 @@ Responses vary based on which path and HTTP method you choose.
{
"doc" : {
"_index" : "index",
"_type" : "_doc",
"_id" : "id",
"_source" : {
"location" : "new-new",
@ -122,7 +120,6 @@ Responses vary based on which path and HTTP method you choose.
{
"doc" : {
"_index" : "index",
"_type" : "_doc",
"_id" : "id",
"_source" : {
"field-name" : "value",
@ -136,7 +133,6 @@ Responses vary based on which path and HTTP method you choose.
{
"doc" : {
"_index" : "index",
"_type" : "_doc",
"_id" : "id",
"_source" : {
"field-name" : "value",
@ -165,7 +161,6 @@ With the `verbose` parameter set to `true`, the response shows how each processo
"status" : "success",
"doc" : {
"_index" : "index",
"_type" : "_doc",
"_id" : "id",
"_source" : {
"field-name" : "value",
@ -186,7 +181,6 @@ With the `verbose` parameter set to `true`, the response shows how each processo
"status" : "success",
"doc" : {
"_index" : "index",
"_type" : "_doc",
"_id" : "id",
"_source" : {
"field-name" : "value",

View File

@ -112,7 +112,6 @@ OpenSearch returns an array with the results of each search in the same order as
"hits" : [
{
"_index" : "opensearch_dashboards_sample_data_logs",
"_type" : "_doc",
"_id" : "_fnhBXsBgv2Zxgu9dZ8Y",
"_score" : 1.0,
"_source" : {
@ -177,7 +176,6 @@ OpenSearch returns an array with the results of each search in the same order as
"hits" : [
{
"_index" : "opensearch_dashboards_sample_data_ecommerce",
"_type" : "_doc",
"_id" : "efnhBXsBgv2Zxgu9ap7e",
"_score" : 1.0,
"_source" : {

View File

@ -125,7 +125,6 @@ version | Boolean | Whether to include the document version in the response.
"hits": [
{
"_index": "superheroes",
"_type": "_doc",
"_id": "1",
"_score": 1.0,
"_source": {

View File

@ -210,7 +210,6 @@ GET shakespeare/_search
"hits": [
{
"_index": "shakespeare",
"_type": "_doc",
"_id": "22006",
"_score": 9.712725,
"_source": {
@ -225,7 +224,6 @@ GET shakespeare/_search
},
{
"_index": "shakespeare",
"_type": "_doc",
"_id": "54665",
"_score": 9.712725,
"_source": {
@ -327,8 +325,7 @@ The phrase "to be" is prefix matched with the FST of the `text_entry` field.
"options": [
{
"text": "To be a comrade with the wolf and owl,--",
"_index": "shakespeare",
"_type": "_doc",
"_index": "shakespeare",
"_id": "50652",
"_score": 1,
"_source": {
@ -344,7 +341,6 @@ The phrase "to be" is prefix matched with the FST of the `text_entry` field.
{
"text": "To be a make-peace shall become my age:",
"_index": "shakespeare",
"_type": "_doc",
"_id": "78566",
"_score": 1,
"_source": {
@ -411,7 +407,6 @@ GET shakespeare/_search
{
"text": "To make a bastard and a slave of me!",
"_index": "shakespeare",
"_type": "_doc",
"_id": "5369",
"_score": 4,
"_source": {
@ -427,7 +422,6 @@ GET shakespeare/_search
{
"text": "To make a bloody supper in the Tower.",
"_index": "shakespeare",
"_type": "_doc",
"_id": "12504",
"_score": 4,
"_source": {
@ -513,7 +507,6 @@ You see the indexed document as the first result:
{
"text": "To be, or not to be: that is the question:",
"_index": "shakespeare",
"_type": "_doc",
"_id": "1",
"_score": 30,
"_source": {
@ -529,7 +522,6 @@ You see the indexed document as the first result:
{
"text": "To make a bastard and a slave of me!",
"_index": "shakespeare",
"_type": "_doc",
"_id": "5369",
"_score": 4,
"_source": {

View File

@ -22,7 +22,7 @@ If the security plugin is enabled, make sure that non-admin users are mapped to
## Get started with auto-follow
Replication rules are a collection of patterns that you create against a single remote cluster. When you create a replication rule, it automatically starts replicating any *new* indexes that match the pattern, but does not replicate matching indexes that were previously created.
Replication rules are a collection of patterns that you create against a single follower cluster. When you create a replication rule, it starts by automatically replicating any *existing* indexes that match the pattern. It will then continue to replicate any *new* indexes that you create that match the pattern.
Create a replication rule on the follower cluster:
@ -92,7 +92,7 @@ curl -XGET -u 'admin:admin' -k 'https://localhost:9200/_plugins/_replication/aut
## Delete a replication rule
To delete a replication rule, send the following request:
To delete a replication rule, send the following request to the follower cluster:
```bash
curl -XDELETE -k -H 'Content-Type: application/json' -u 'admin:admin' 'https://localhost:9200/_plugins/_replication/_autofollow?pretty' -d '
@ -102,5 +102,4 @@ curl -XDELETE -k -H 'Content-Type: application/json' -u 'admin:admin' 'https://l
}'
```
OpenSearch stops replicating *new* indexes that match the pattern, but existing indexes that the rule previously created remain read-only and continue to replicate. If you need to stop existing replication activity and open the indexes up for writes, use the [stop replication API operation]({{site.url}}{{site.baseurl}}/replication-plugin/api/#stop-replication).
When you delete a replication rule, OpenSearch stops replicating *new* indexes that match the pattern, but existing indexes that the rule previously created remain read-only and continue to replicate. If you need to stop existing replication activity and open the indexes up for writes, use the [stop replication API operation]({{site.url}}{{site.baseurl}}/replication-plugin/api/#stop-replication).

View File

@ -226,7 +226,6 @@ curl -XGET -k -u 'admin:admin' 'https://localhost:9200/follower-01/_search?prett
...
"hits": [{
"_index": "follower-01",
"_type": "_doc",
"_id": "1",
"_score": 1.0,
"_source": {

View File

@ -148,7 +148,6 @@ GET _plugins/_asynchronous_search/<ID>?pretty
"hits": [
{
"_index": "bank",
"_type": "_doc",
"_id": "1",
"_score": 1,
"_source": {

View File

@ -256,7 +256,6 @@ GET/POST /_plugins/_knn/models/_search?pretty&_source_excludes=model_blob
"hits" : [
{
"_index" : ".opensearch-knn-models",
"_type" : "_doc",
"_id" : "test-model",
"_score" : 1.0,
"_source" : {

View File

@ -86,7 +86,7 @@ kibana_user:
Hidden resources are automatically reserved.
To add or remove these flags, modify `plugins/opensearch-security/securityconfig/internal_users.yml` and run `plugins/opensearch-security/tools/securityadmin.sh`.
To add or remove these flags, modify `config/opensearch-security/internal_users.yml` and run `plugins/opensearch-security/tools/securityadmin.sh`.
---

View File

@ -176,7 +176,6 @@ curl -XGET -k -u 'admin:admin' 'https://localhost:9250/opensearch-ccs-cluster1:b
...
"hits": [{
"_index": "opensearch-ccs-cluster1:books",
"_type": "_doc",
"_id": "1",
"_score": 1.0,
"_source": {
@ -230,7 +229,6 @@ curl -XGET -k -u booksuser:password 'https://localhost:9250/opensearch-ccs-clust
...
"hits": [{
"_index": "opensearch-ccs-cluster1:books",
"_type": "_doc",
"_id": "1",
"_score": 1.0,
"_source": {

View File

@ -5,7 +5,7 @@ parent: Access control
nav_order: 10
---
# Document-level security
# Document-level security (DLS)
Document-level security lets you restrict a role to a subset of documents in an index. The easiest way to get started with document- and field-level security is open OpenSearch Dashboards and choose **Security**. Then choose **Roles**, create a new role, and review the **Index permissions** section.
@ -124,3 +124,26 @@ PUT _plugins/_security/api/roles/abac
}]
}
```
## Use term-level lookup queries (TLQs) with DLS
You can perform term-level lookup queries (TLQs) with document-level security (DLS) using either of two modes: adaptive or filter level. The default mode is adaptive, where OpenSearch automatically switches between Lucene-level or filter-level mode depending on whether or not there is a TLQ. DLS queries without TLQs are executed in Lucene-level mode, whereas DLS queries with TLQs are executed in filter-level mode.
By default, the security plugin detects if a DLS query contains a TLQ or not and chooses the appropriate mode automatically at runtime.
To learn more about OpenSearch queries, see [Term-level queries](https://opensearch.org/docs/latest/opensearch/query-dsl/term/).
### How to set the DLS evaluation mode in `opensearch.yml`
By default, the DLS evaluation mode is set to `adaptive`. You can also explicitly set the mode in `opensearch.yml` with the `plugins.security.dls.mode` setting. Add a line to `opensearch.yml` with the desired evaluation mode.
For example, to set it to filter level, add this line:
```
plugins.security.dls.mode: filter-level
```
#### DLS evaluation modes
| Evaluation mode | Parameter | Description | Usage |
:--- | :--- | :--- | :--- |
Lucene-level DLS | `lucene-level` | This setting makes all DLS queries apply to the Lucene level. | Lucene-level DLS modifies Lucene queries and data structures directly. This is the most efficient mode but does not allow certain advanced constructs in DLS queries, including TLQs.
Filter-level DLS | `filter-level` | This setting makes all DLS queries apply to the filter level. | In this mode, OpenSearch applies DLS by modifying queries that OpenSearch receives. This allows for term-level lookup queries in DLS queries, but you can only use the `get`, `search`, `mget`, and `msearch` operations to retrieve data from the protected index. Additionally, cross-cluster searches are limited with this mode.
Adaptive | `adaptive-level` | The default setting that allows OpenSearch to automatically choose the mode. | DLS queries without TLQs are executed in Lucene-level mode, while DLS queries that contain TLQ are executed in filter- level mode.

View File

@ -27,7 +27,6 @@ You have two options when you configure field-level security: include or exclude
```json
{
"_index": "movies",
"_type": "_doc",
"_source": {
"year": 2013,
"title": "Rush",
@ -45,7 +44,6 @@ If you exclude fields, users see everything *but* those fields when they retriev
```json
{
"_index": "movies",
"_type": "_doc",
"_source": {
"directors": [
"Ron Howard"
@ -107,7 +105,6 @@ For example, in the `movies` index, if you include `actors`, `title`, and `year`
```json
{
"_index": "movies",
"_type": "_doc",
"_source": {
"year": 2013,
"directors": [

View File

@ -14,7 +14,6 @@ Field masking works alongside field-level security on the same per-role, per-ind
```json
{
"_index": "movies",
"_type": "_doc",
"_source": {
"year": 2013,
"directors": [

View File

@ -29,7 +29,7 @@ http://<opensearch_dashboards_host>:5601/app/opensearch-dashboards?security_tena
## Configuration
Multi-tenancy is enabled by default, but you can disable it or change its settings using `plugins/opensearch-security/securityconfig/config.yml`:
Multi-tenancy is enabled by default, but you can disable it or change its settings using `config/opensearch-security/config.yml`:
```yml
config:
@ -53,7 +53,7 @@ Setting | Description
```yml
opensearch.username: kibanaserver
opensearch.password: kibanaserver
opensearch.requestHeadersWhitelist: ["securitytenant","Authorization"]
opensearch.requestHeadersAllowlist: ["securitytenant","Authorization"]
opensearch_security.multitenancy.enabled: true
opensearch_security.multitenancy.tenants.enable_global: true
opensearch_security.multitenancy.tenants.enable_private: true
@ -63,7 +63,7 @@ opensearch_security.multitenancy.enable_filter: false
Setting | Description
:--- | :---
`opensearch.requestHeadersWhitelist` | OpenSearch Dashboards requires that you whitelist all HTTP headers that it passes to OpenSearch. Multi-tenancy uses a specific header, `securitytenant`, that must be present with the standard `Authorization` header. If the `securitytenant` header is not whitelisted, OpenSearch Dashboards starts with a red status.
`opensearch.requestHeadersAllowlist` | OpenSearch Dashboards requires that you add all HTTP headers to the allow list so that the headers pass to OpenSearch. Multi-tenancy uses a specific header, `securitytenant`, that must be present with the standard `Authorization` header. If the `securitytenant` header is not on the allow list, OpenSearch Dashboards starts with a red status.
`opensearch_security.multitenancy.enabled` | Enables or disables multi-tenancy in OpenSearch Dashboards. Default is true.
`opensearch_security.multitenancy.tenants.enable_global` | Enables or disables the global tenant. Default is true.
`opensearch_security.multitenancy.tenants.enable_private` | Enables or disables the private tenant. Default is true.

View File

@ -15,7 +15,7 @@ Understanding the authentication flow is a great way to get started with configu
2. The security plugin authenticates the user's credentials against a backend: the internal user database, Lightweight Directory Access Protocol (LDAP), Active Directory, Kerberos, or JSON web tokens.
The plugin supports chaining backends in `securityconfig/config.yml`. If more than one backend is present, the plugin tries to authenticate the user sequentially against each until one succeeds. A common use case is to combine the internal user database of the security plugin with LDAP/Active Directory.
The plugin supports chaining backends in `config/opensearch-security/config.yml`. If more than one backend is present, the plugin tries to authenticate the user sequentially against each until one succeeds. A common use case is to combine the internal user database of the security plugin with LDAP/Active Directory.
3. After a backend verifies the user's credentials, the plugin collects any backend roles. These roles can be arbitrary strings in the internal user database, but in most cases, these backend roles come from LDAP/Active Directory.

View File

@ -9,7 +9,7 @@ nav_order: 2
One of the first steps to using the security plugin is to decide on an authentication backend, which handles [steps 2-3 of the authentication flow]({{site.url}}{{site.baseurl}}/security-plugin/configuration/concepts#authentication-flow). The plugin has an internal user database, but many people prefer to use an existing authentication backend, such as an LDAP server, or some combination of the two.
The main configuration file for authentication and authorization backends is `plugins/opensearch-security/securityconfig/config.yml`. It defines how the security plugin retrieves the user credentials, how it verifies these credentials, and how to fetch additional roles from backend systems (optional).
The main configuration file for authentication and authorization backends is `config/opensearch-security/config.yml`. It defines how the security plugin retrieves the user credentials, how it verifies these credentials, and how to fetch additional roles from backend systems (optional).
`config.yml` has three main parts:
@ -123,7 +123,7 @@ These are the possible values for `type`:
## Examples
The default `plugins/opensearch-security/securityconfig/config.yml` that ships with OpenSearch contains many configuration examples. Use these examples as a starting point, and customize them to your needs.
The default `config/opensearch-security/config.yml` that ships with OpenSearch contains many configuration examples. Use these examples as a starting point, and customize them to your needs.
## HTTP basic

View File

@ -55,7 +55,7 @@ We provide a fully functional example that can help you understand how to use an
## Connection settings
To enable LDAP authentication and authorization, add the following lines to `plugins/opensearch-security/securityconfig/config.yml`:
To enable LDAP authentication and authorization, add the following lines to `config/opensearch-security/config.yml`:
```yml
authc:
@ -247,7 +247,7 @@ Name | Description
## Use Active Directory and LDAP for authentication
To use Active Directory/LDAP for authentication, first configure a respective authentication domain in the `authc` section of `plugins/opensearch-security/securityconfig/config.yml`:
To use Active Directory/LDAP for authentication, first configure a respective authentication domain in the `authc` section of `config/opensearch-security/config.yml`:
```yml
authc:
@ -433,7 +433,7 @@ By default, the security plugin reads all LDAP user attributes and makes them av
Name | Description
:--- | :---
`custom_attr_whitelist` | String array. Specifies the LDAP attributes that should be made available for variable substitution.
`custom_attr_allowlist` | String array. Specifies the LDAP attributes that should be made available for variable substitution.
`custom_attr_maxval_len` | Integer. Specifies the maximum allowed length of each attribute. All attributes longer than this value are discarded. A value of `0` disables custom attributes altogether. Default is 36.
Example:
@ -446,7 +446,7 @@ authz:
authorization_backend:
type: ldap
config:
custom_attr_whitelist:
custom_attr_allowlist:
- attribute1
- attribute2
custom_attr_maxval_len: 36
@ -501,7 +501,7 @@ Name | Description
`skip_users` | Array of users that should be skipped when retrieving roles. Wildcards and regular expressions are supported.
`nested_role_filter` | Array of role DNs that should be filtered before resolving nested roles. Wildcards and regular expressions are supported.
`rolesearch_enabled` | Boolean. Enable or disable the role search. Default is `true`.
`custom_attr_whitelist` | String array. Specifies the LDAP attributes that should be made available for variable substitution.
`custom_attr_allowlist` | String array. Specifies the LDAP attributes that should be made available for variable substitution.
`custom_attr_maxval_len` | Integer. Specifies the maximum allowed length of each attribute. All attributes longer than this value are discarded. A value of `0` disables custom attributes altogether. Default is 36.

View File

@ -308,8 +308,8 @@ opensearch.password: "kibanaserver"
# Disable SSL verification when using self-signed demo certificates
opensearch.ssl.verificationMode: none
# Whitelist basic headers and multi-tenancy header
opensearch.requestHeadersWhitelist: ["Authorization", "security_tenant"]
# allowlist basic headers and multi-tenancy header
opensearch.requestHeadersAllowlist: ["Authorization", "security_tenant"]
```

View File

@ -192,10 +192,10 @@ config:
internalProxies: '<opensearch-dashboards-ip-address>'
```
To pass the user and role headers that the authenticating proxy adds from OpenSearch Dashboards to the security plugin, add them to the HTTP header whitelist in `opensearch_dashboards.yml`:
To pass the user and role headers that the authenticating proxy adds from OpenSearch Dashboards to the security plugin, add them to the HTTP header allow list in `opensearch_dashboards.yml`:
```yml
opensearch.requestHeadersWhitelist: ["securitytenant","Authorization","x-forwarded-for","x-proxy-user","x-proxy-roles"]
opensearch.requestHeadersAllowlist: ["securitytenant","Authorization","x-forwarded-for","x-proxy-user","x-proxy-roles"]
```
You must also enable the authentication type in `opensearch_dashboards.yml`:

View File

@ -37,7 +37,7 @@ We provide a fully functional example that can help you understand how to use SA
## Activating SAML
To use SAML for authentication, you need to configure a respective authentication domain in the `authc` section of `plugins/opensearch-security/securityconfig/config.yml`. Because SAML works solely on the HTTP layer, you do not need any `authentication_backend` and can set it to `noop`. Place all SAML-specific configuration options in this chapter in the `config` section of the SAML HTTP authenticator:
To use SAML for authentication, you need to configure a respective authentication domain in the `authc` section of `config/opensearch-security/config.yml`. Because SAML works solely on the HTTP layer, you do not need any `authentication_backend` and can set it to `noop`. Place all SAML-specific configuration options in this chapter in the `config` section of the SAML HTTP authenticator:
```yml
authc:
@ -302,16 +302,16 @@ Because most of the SAML-specific configuration is done in the security plugin,
opensearch_security.auth.type: "saml"
```
In addition, the OpenSearch Dashboards endpoint for validating the SAML assertions must be whitelisted:
In addition, you must add the OpenSearch Dashboards endpoint for validating the SAML assertions to your allow list:
```yml
server.xsrf.whitelist: ["/_plugins/_security/saml/acs"]
server.xsrf.allowlist: ["/_plugins/_security/saml/acs"]
```
If you use the logout POST binding, you also need to whitelist the logout endpoint:
If you use the logout POST binding, you also need to ad the logout endpoint to your allow list:
```yml
server.xsrf.whitelist: ["/_plugins/_security/saml/acs", "/_plugins/_security/saml/logout"]
server.xsrf.allowlist: ["/_plugins/_security/saml/acs", "/_plugins/_security/saml/logout"]
```
### IdP-initiated SSO
@ -322,8 +322,8 @@ To use IdP-initiated SSO, set the Assertion Consumer Service endpoint of your Id
/_plugins/_security/saml/acs/idpinitiated
```
Then add this endpoint to `server.xsrf.whitelist` in `opensearch_dashboards.yml`:
Then add this endpoint to `server.xsrf.allowlist` in `opensearch_dashboards.yml`:
```yml
server.xsrf.whitelist: ["/_plugins/_security/saml/acs/idpinitiated", "/_plugins/_security/saml/acs", "/_plugins/_security/saml/logout"]
server.xsrf.allowlist: ["/_plugins/_security/saml/acs/idpinitiated", "/_plugins/_security/saml/acs", "/_plugins/_security/saml/logout"]
```

View File

@ -9,12 +9,12 @@ nav_order: 20
The security plugin stores its configuration---including users, roles, and permissions---in an index on the OpenSearch cluster (`.opendistro_security`). Storing these settings in an index lets you change settings without restarting the cluster and eliminates the need to edit configuration files on every single node.
To initialize the `.opendistro_security` index, however, you must run `plugins/opensearch-security/tools/securityadmin.sh`. This script loads your initial configuration into the index using the configuration files in `plugins/opensearch-security/securityconfig`. After the `.opendistro_security` index is initialized, use OpenSearch Dashboards or the REST API to manage your users, roles, and permissions.
To initialize the `.opendistro_security` index, however, you must run `plugins/opensearch-security/tools/securityadmin.sh`. This script loads your initial configuration into the index using the configuration files in `config/opensearch-security`. After the `.opendistro_security` index is initialized, use OpenSearch Dashboards or the REST API to manage your users, roles, and permissions.
## A word of caution
If you make changes to the configuration files in `plugins/opensearch-security/securityconfig`, OpenSearch does _not_ automatically apply these changes. Instead, you must run `securityadmin.sh` to load the updated files into the index.
If you make changes to the configuration files in `config/opensearch-security`, OpenSearch does _not_ automatically apply these changes. Instead, you must run `securityadmin.sh` to load the updated files into the index.
Running `securityadmin.sh` **overwrites** one or more portions of the `.opendistro_security` index. Run it with extreme care to avoid losing your existing resources. Consider the following example:
@ -38,7 +38,7 @@ To avoid this situation, back up your current configuration before making change
If you use the `-f` argument rather than `-cd`, you can load a single YAML file into the index rather than the entire directory of YAML files. For example, if you create ten new roles, you can safely load `internal_users.yml` into the index without losing your roles; only the internal users get overwritten.
```bash
./securityadmin.sh -f ../securityconfig/internal_users.yml \
./securityadmin.sh -f ../../../config/opensearch-security/internal_users.yml \
-t internalusers \
-icl \
-nhnv \
@ -50,7 +50,7 @@ If you use the `-f` argument rather than `-cd`, you can load a single YAML file
To resolve all environment variables before applying the security configurations, use the `-rev` parameter.
```bash
./securityadmin.sh -cd ../securityconfig/ \
./securityadmin.sh -cd ../../../config/opensearch-security/ \
-rev \
-cacert ../../../root-ca.pem \
-cert ../../../kirk.pem \
@ -95,7 +95,7 @@ To print all available command line options, run the script with no arguments:
To load your initial configuration (all YAML files), you might use the following command:
```bash
./securityadmin.sh -cd ../securityconfig/ -icl -nhnv \
./securityadmin.sh -cd ../../../config/opensearch-security/ -icl -nhnv \
-cacert ../../../config/root-ca.pem \
-cert ../../../config/kirk.pem \
-key ../../../config/kirk-key.pem
@ -118,32 +118,32 @@ Name | Description
## Sample commands
Apply all YAML files in `securityconfig` using PEM certificates:
Apply all YAML files in `config/opensearch-security/` using PEM certificates:
```bash
/usr/share/opensearch/plugins/opensearch-security/tools/securityadmin.sh \
-cacert /etc/opensearch/root-ca.pem \
-cert /etc/opensearch/kirk.pem \
-key /etc/opensearch/kirk-key.pem \
-cd /usr/share/opensearch/plugins/opensearch-security/securityconfig/
-cd /usr/share/opensearch/config/opensearch-security/
```
Apply a single YAML file (`config.yml`) using PEM certificates:
```bash
./securityadmin.sh \
-f ../securityconfig/config.yml \
-f ../../../config/opensearch-security/config.yml \
-icl -nhnv -cert /etc/opensearch/kirk.pem \
-cacert /etc/opensearch/root-ca.pem \
-key /etc/opensearch/kirk-key.pem \
-t config
```
Apply all YAML files in `securityconfig` with keystore and truststore files:
Apply all YAML files in `config/opensearch-security/` with keystore and truststore files:
```bash
./securityadmin.sh \
-cd /usr/share/opensearch/plugins/opensearch-security/securityconfig/ \
-cd /usr/share/opensearch/config/opensearch-security/ \
-ks /path/to/keystore.jks \
-kspass changeit \
-ts /path/to/truststore.jks \
@ -158,7 +158,7 @@ Apply all YAML files in `securityconfig` with keystore and truststore files:
You can also use keystore files in JKS format in conjunction with `securityadmin.sh`:
```bash
./securityadmin.sh -cd ../securityconfig -icl -nhnv
./securityadmin.sh -cd ../../../config/opensearch-security -icl -nhnv
-ts <path/to/truststore> -tspass <truststore password>
-ks <path/to/keystore> -kspass <keystore password>
```
@ -216,13 +216,13 @@ Name | Description
To upload all configuration files in a directory, use this:
```bash
./securityadmin.sh -cd ../securityconfig -ts ... -tspass ... -ks ... -kspass ...
./securityadmin.sh -cd ../../../config/opensearch-security -ts ... -tspass ... -ks ... -kspass ...
```
If you want to push a single configuration file, use this:
```bash
./securityadmin.sh -f ../securityconfig/internal_users.yml -t internalusers \
./securityadmin.sh -f ../../../config/opensearch-security/internal_users.yml -t internalusers \
-ts ... -tspass ... -ks ... -kspass ...
```
@ -274,7 +274,7 @@ To upload the dumped files to another cluster:
To migrate configuration YAML files from the Open Distro for Elasticsearch 0.x.x format to the OpenSearch 1.x.x format:
```bash
./securityadmin.sh -migrate ../securityconfig -ts ... -tspass ... -ks ... -kspass ...
./securityadmin.sh -migrate ../../../config/opensearch-security -ts ... -tspass ... -ks ... -kspass ...
```
Name | Description

View File

@ -7,7 +7,7 @@ nav_order: 3
# YAML files
Before running `securityadmin.sh` to load the settings into the `.opendistro_security` index, configure the YAML files in `plugins/opensearch-security/securityconfig`. You might want to back up these files so that you can reuse them on other clusters.
Before running `securityadmin.sh` to load the settings into the `.opendistro_security` index, configure the YAML files in `config/opensearch-security`. You might want to back up these files so that you can reuse them on other clusters.
The best use of these YAML files is to configure [reserved and hidden resources]({{site.url}}{{site.baseurl}}/security-plugin/access-control/api#reserved-and-hidden-resources), such as the `admin` and `kibanaserver` users. You might find it easier to create other users, roles, mappings, action groups, and tenants using OpenSearch Dashboards or the REST API.
@ -124,16 +124,16 @@ plugins.security.restapi.password_validation_regex: '(?=.*[A-Z])(?=.*[^a-zA-Z\d]
plugins.security.restapi.password_validation_error_message: "Password must be minimum 8 characters long and must contain at least one uppercase letter, one lowercase letter, one digit, and one special character."
```
## whitelist.yml
## allowlist.yml
You can use `whitelist.yml` to add any endpoints and HTTP requests to a list of allowed endpoints and requests. If enabled, all users except the super admin are allowed access to only the specified endpoints and HTTP requests, and all other HTTP requests associated with the endpoint are denied. For example, if GET `_cluster/settings` is added to the allow list, users cannot submit PUT requests to `_cluster/settings` to update cluster settings.
You can use `allowlist.yml` to add any endpoints and HTTP requests to a list of allowed endpoints and requests. If enabled, all users except the super admin are allowed access to only the specified endpoints and HTTP requests, and all other HTTP requests associated with the endpoint are denied. For example, if GET `_cluster/settings` is added to the allow list, users cannot submit PUT requests to `_cluster/settings` to update cluster settings.
Note that while you can configure access to endpoints this way, for most cases, it is still best to configure permissions using the security plugin's users and roles, which have more granular settings.
```yml
---
_meta:
type: "whitelist"
type: "allowlist"
config_version: 2
# Description:
@ -165,7 +165,7 @@ requests:
- PUT
```
You can also add custom indices to the allow list. `whitelist.yml` doesn't support wildcards, so you must manually specify all of the indices you want to add.
You can also add custom indices to the allow list. `allowlist.yml` doesn't support wildcards, so you must manually specify all of the indexes you want to add.
```yml
requests: # Only allow GET requests to /sample-index1/_doc/1 and /sample-index2/_doc/1

View File

@ -45,12 +45,12 @@ The endpoint the OpenSearch Dashboards security plugin provides is:
/_plugins/_security/saml/acs
```
Make sure that you have configured this endpoint correctly in your IdP. Some IdPs also require you to whitelist all endpoints that they send requests to. Ensure that the ACS endpoint is listed.
Make sure that you have configured this endpoint correctly in your IdP. Some IdPs also require you to add all endpoints to the allow list that they send requests to. Ensure that the ACS endpoint is listed.
OpenSearch Dashboards also requires you to whitelist this endpoint. Make sure you have the following entry in `opensearch_dashboards.yml`:
OpenSearch Dashboards also requires you to add this endpoint to the allow list. Make sure you have the following entry in `opensearch_dashboards.yml`:
```
server.xsrf.whitelist: [/_plugins/_security/saml/acs]
server.xsrf.allowlist: [/_plugins/_security/saml/acs]
```

View File

@ -100,7 +100,7 @@ You must use an admin certificate when executing the script. To learn more, see
For more information on why `securityadmin.sh` is not executing, add the `--diagnose` option:
```
./securityadmin.sh -diagnose -cd ../securityconfig/ -cacert ... -cert ... -key ... -keypass ...
./securityadmin.sh -diagnose -cd ../../../config/opensearch-security/ -cacert ... -cert ... -key ... -keypass ...
```
The script prints the location of the generated diagnostic file.

View File

@ -21,7 +21,7 @@ This page includes troubleshooting steps for configuring TLS certificates with t
## Validate YAML
`opensearch.yml` and the files in `opensearch_security/securityconfig/` are in the YAML format. A linter like [YAML Validator](https://codebeautify.org/yaml-validator) can help verify that you don't have any formatting errors.
`opensearch.yml` and the files in `config/opensearch-security/` are in the YAML format. A linter like [YAML Validator](https://codebeautify.org/yaml-validator) can help verify that you don't have any formatting errors.
## View contents of PEM certificates

View File

@ -1,6 +1,9 @@
/* During build, DOC_VERSIONS is prefixed to convey all the versions available, informed by `_data/versions.json`
* Example:
* const DOC_VERSIONS = ["1.1","1.0"];
*
* DOC_VERSION_LATEST will pick `latest`, or in its absence the `current` version.
* const DOC_VERSION_LATEST = "2.0";
*/
const PREFIX = "OpenSearch ";
const tpl = `
@ -155,7 +158,7 @@ class VersionSelector extends HTMLElement {
frag.querySelector('#selected').textContent = `${PREFIX}${this.getAttribute('selected')}.x`;
const pathName = location.pathname.replace(/\/docs(\/((latest|\d+\.\d+)\/?)?)?/, '');
const versionsDOMText = DOC_VERSIONS.map((v, idx) => `<a href="/docs/${v}/${pathName}"${idx === 0 ? ' class="latest"' : ''}>${PREFIX}${v}.x</a>`)
const versionsDOMText = DOC_VERSIONS.map((v, idx) => `<a href="/docs/${v}/${pathName}"${v === DOC_VERSION_LATEST ? ' class="latest"' : ''}>${PREFIX}${v}.x</a>`)
.join('');
frag.querySelector('#dropdown').appendChild(this._makeFragment(versionsDOMText));

View File

@ -2,8 +2,17 @@
permalink: /assets/js/version-selector.js
---
(() => {
{% assign current_array = site.data.versions.current | split: '!' %}
{% assign all_versions = current_array | concat: site.data.versions.past %}
{% if site.data.versions.all contains site.data.versions.current %}
{% assign all_versions = site.data.versions.all %}
{% else %}
{% assign current_array = site.data.versions.all | split: '!' %}
{% assign all_versions = current_array | concat: site.data.versions.all %}
{% endif %}
const DOC_VERSIONS = {{ all_versions | jsonify }};
{% if site.data.versions.latest %}
const DOC_VERSION_LATEST = {{ site.data.versions.latest | jsonify }};
{% else %}
const DOC_VERSION_LATEST = {{ site.data.versions.current | jsonify }};
{% endif %}
{% include_relative _version-selector.js %}
})();

View File

@ -9,6 +9,7 @@ permalink: /version-history/
OpenSearch version | Release highlights | Release date
:--- | :--- | :--- | :---
[2.0.0-rc1](https://github.com/opensearch-project/opensearch-build/blob/main/release-notes/opensearch-release-notes-2.0.0-rc1.md) | The Release Candidate for 2.0.0. This version allows you to preview the upcoming 2.0.0 release before the GA release. The preview release adds document level alerting, support for Lucene 9, and the ability to use term lookup queries in document level security. | 3 May 2022
[1.3.1](https://github.com/opensearch-project/opensearch-build/blob/main/release-notes/opensearch-release-notes-1.3.1.md) | Bug fixes when using document-level security, and adjusted ML Commons to use the latest RCF jar and protostuff to RCF model serialization. | 30 March 2022
[1.3.0](https://github.com/opensearch-project/opensearch-build/blob/main/release-notes/opensearch-release-notes-1.3.0.md) | Adds Model Type Validation to Validate Detector API, continuous transforms, custom actions, applied policy parameter to Explain API, default action retries, and new rollover and transition conditions to Index Management, new ML Commons plugin, parse command to SQL, Application Analytics, Live Tail, Correlation, and Events Flyout to Observbility, and auto backport and support for OPENSEARCH_JAVA_HOME to Performance Analyzer. Bug fixes. | 17 March 2022
[1.2.4](https://github.com/opensearch-project/opensearch-build/blob/main/release-notes/opensearch-release-notes-1.2.4.md) | Updates Performance Analyzer, SQL, and Security plugins to Log4j 2.17.1, Alerting and Job Scheduler to cron-utils 9.1.6, and gson in Anomaly Detection and SQL. | 18 January 2022