Merge branch 'main' of https://github.com/opensearch-project/documentation-website into cross-cluster-replication
This commit is contained in:
commit
214f82f1e3
|
@ -27,12 +27,18 @@ PUT _cluster/settings
|
|||
}
|
||||
```
|
||||
|
||||
[Just like any other setting]({{site.url}}{{site.baseurl}}/opensearch/configuration/), the alternative is to add the following line to `opensearch.yml` on each node and then restart the node:
|
||||
|
||||
```yml
|
||||
compatibility.override_main_response_version: true
|
||||
```
|
||||
|
||||
|
||||
## Downloads
|
||||
|
||||
You can download the OpenSearch output plugin for Logstash from [OpenSearch downloads](https://opensearch.org/downloads.html). The Logstash output plugin is compatible with OpenSearch and Elasticsearch OSS (7.10.2 or lower).
|
||||
|
||||
These versions of Beats offer the best compatibility with OpenSearch. For more information, see the [compatibility matrices](#compatibility-matrices).
|
||||
These are the latest versions of Beats OSS with OpenSearch compatibility. For more information, see the [compatibility matrices](#compatibility-matrices).
|
||||
|
||||
- [Filebeat OSS 7.12.1](https://www.elastic.co/downloads/past-releases/filebeat-oss-7-12-1)
|
||||
- [Metricbeat OSS 7.12.1](https://www.elastic.co/downloads/past-releases/metricbeat-oss-7-12-1)
|
||||
|
@ -41,7 +47,7 @@ These versions of Beats offer the best compatibility with OpenSearch. For more i
|
|||
- [Winlogbeat OSS 7.12.1](https://www.elastic.co/downloads/past-releases/winlogbeat-oss-7-12-1)
|
||||
- [Auditbeat OSS 7.12.1](https://elastic.co/downloads/past-releases/auditbeat-oss-7-12-1)
|
||||
|
||||
Some users report compatibility issues with ingest pipelines on these versions of Beats. If you use ingest pipelines with OpenSearch, consider using the 7.10.2 versions of Beats OSS instead.
|
||||
Some users report compatibility issues with ingest pipelines on these versions of Beats. If you use ingest pipelines with OpenSearch, consider using the 7.10.2 versions of Beats instead.
|
||||
{: .note }
|
||||
|
||||
|
||||
|
|
|
@ -0,0 +1,145 @@
|
|||
---
|
||||
layout: default
|
||||
title: Go client
|
||||
nav_order: 80
|
||||
---
|
||||
|
||||
# Go client
|
||||
|
||||
The OpenSearch Go client lets you connect your Go application with the data in your OpenSearch cluster.
|
||||
|
||||
|
||||
## Setup
|
||||
|
||||
If you're creating a new project:
|
||||
|
||||
```go
|
||||
go mod init
|
||||
```
|
||||
|
||||
To add the client to your project, import it like any other module:
|
||||
|
||||
```go
|
||||
go get github.com/opensearch-project/opensearch-go
|
||||
```
|
||||
|
||||
## Sample code
|
||||
|
||||
This sample code creates a client, adds an index with non-default settings, inserts a document, searches for the document, deletes the document, and finally deletes the index:
|
||||
|
||||
```go
|
||||
package main
|
||||
import (
|
||||
"os"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
opensearch "github.com/opensearch-project/opensearch-go"
|
||||
opensearchapi "github.com/opensearch-project/opensearch-go/opensearchapi"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
const IndexName = "go-test-index1"
|
||||
func main() {
|
||||
// Initialize the client with SSL/TLS enabled.
|
||||
client, err := opensearch.NewClient(opensearch.Config{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
},
|
||||
Addresses: []string{"https://localhost:9200"},
|
||||
Username: "admin", // For testing only. Don't store credentials in code.
|
||||
Password: "admin",
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Println("cannot initialize", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Print OpenSearch version information on console.
|
||||
fmt.Println(client.Info())
|
||||
|
||||
// Define index mapping.
|
||||
mapping := strings.NewReader(`{
|
||||
'settings': {
|
||||
'index': {
|
||||
'number_of_shards': 4
|
||||
}
|
||||
}
|
||||
}`)
|
||||
|
||||
// Create an index with non-default settings.
|
||||
res := opensearchapi.CreateRequest{
|
||||
Index: IndexName,
|
||||
Body: mapping,
|
||||
}
|
||||
fmt.Println("creating index", res)
|
||||
|
||||
// Add a document to the index.
|
||||
document := strings.NewReader(`{
|
||||
"title": "Moneyball",
|
||||
"director": "Bennett Miller",
|
||||
"year": "2011"
|
||||
}`)
|
||||
|
||||
docId := "1"
|
||||
req := opensearchapi.IndexRequest{
|
||||
Index: IndexName,
|
||||
DocumentID: docId,
|
||||
Body: document,
|
||||
}
|
||||
insertResponse, err := req.Do(context.Background(), client)
|
||||
if err != nil {
|
||||
fmt.Println("failed to insert document ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Println(insertResponse)
|
||||
|
||||
// Search for the document.
|
||||
content := strings.NewReader(`{
|
||||
"size": 5,
|
||||
"query": {
|
||||
"multi_match": {
|
||||
"query": "miller",
|
||||
"fields": ["title^2", "director"]
|
||||
}
|
||||
}
|
||||
}`)
|
||||
|
||||
search := opensearchapi.SearchRequest{
|
||||
Body: content,
|
||||
}
|
||||
|
||||
searchResponse, err := search.Do(context.Background(), client)
|
||||
if err != nil {
|
||||
fmt.Println("failed to search document ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Println(searchResponse)
|
||||
|
||||
// Delete the document.
|
||||
delete := opensearchapi.DeleteRequest{
|
||||
Index: IndexName,
|
||||
DocumentID: docId,
|
||||
}
|
||||
|
||||
deleteResponse, err := delete.Do(context.Background(), client)
|
||||
if err != nil {
|
||||
fmt.Println("failed to delete document ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Println("deleting document")
|
||||
fmt.Println(deleteResponse)
|
||||
|
||||
// Delete previously created index.
|
||||
deleteIndex := opensearchapi.IndicesDeleteRequest{
|
||||
Index: []string{IndexName},
|
||||
}
|
||||
|
||||
deleteIndexResponse, err := deleteIndex.Do(context.Background(), client)
|
||||
if err != nil {
|
||||
fmt.Println("failed to delete index ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Println("deleting index", deleteIndexResponse)
|
||||
}
|
||||
```
|
|
@ -9,6 +9,20 @@ redirect_from:
|
|||
|
||||
# OpenSearch client compatibility
|
||||
|
||||
OpenSearch provides clients for several popular programming languages, with more coming. In general, clients are compatible with clusters running the same major version of OpenSearch (`major.minor.patch`).
|
||||
|
||||
For example, a 1.0.0 client works with an OpenSearch 1.1.0 cluster, but might not support any non-breaking API changes in OpenSearch 1.1.0. A 1.2.0 client works with the same cluster, but might allow you to pass unsupported options in certain functions. We recommend using the same version for both, but if your tests pass after a cluster upgrade, you don't necessarily need to upgrade your clients immediately.
|
||||
|
||||
{% comment %}
|
||||
* [OpenSearch Java client]({{site.url}}{{site.baseurl}}/clients/java/)
|
||||
{% endcomment %}
|
||||
* [OpenSearch Python client]({{site.url}}{{site.baseurl}}/clients/python/)
|
||||
* [OpenSearch JavaScript (Node.js) client]({{site.url}}{{site.baseurl}}/clients/javascript/)
|
||||
* [OpenSearch Go client]({{site.url}}{{site.baseurl}}/clients/go/)
|
||||
|
||||
|
||||
## Legacy clients
|
||||
|
||||
Most clients that work with Elasticsearch OSS 7.10.2 *should* work with OpenSearch, but the latest versions of those clients might include license or version checks that artificially break compatibility. This page includes recommendations around which versions of those clients to use for best compatibility with OpenSearch.
|
||||
|
||||
Client | Recommended version
|
||||
|
@ -18,7 +32,7 @@ Client | Recommended version
|
|||
[Python Elasticsearch client](https://pypi.org/project/elasticsearch/7.13.4/) | 7.13.4
|
||||
[Elasticsearch Node.js client](https://www.npmjs.com/package/@elastic/elasticsearch/v/7.13.0) | 7.13.0
|
||||
|
||||
Clients exist for a wide variety of languages, so if you test a client and verify that it works, please [submit a PR](https://github.com/opensearch-project/documentation-website/pulls) and add it to this table.
|
||||
If you test a legacy client and verify that it works, please [submit a PR](https://github.com/opensearch-project/documentation-website/pulls) and add it to this table.
|
||||
|
||||
|
||||
{% comment %}
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
---
|
||||
layout: default
|
||||
title: Java high-level REST client
|
||||
nav_order: 97
|
||||
title: Elasticsearch OSS Java high-level REST client
|
||||
nav_order: 60
|
||||
---
|
||||
|
||||
# Java high-level REST client
|
||||
# Elasticsearch OSS Java high-level REST client
|
||||
|
||||
The Elasticsearch OSS Java high-level REST client allows you to interact with your OpenSearch clusters and indices through Java methods and data structures rather than HTTP methods and JSON.
|
||||
|
||||
|
@ -22,7 +22,7 @@ To start using the Elasticsearch OSS Java high-level REST client, ensure that yo
|
|||
</dependency>
|
||||
```
|
||||
|
||||
You can now start your OpenSearch cluster. The 7.10.2 high-level REST client works with the 1.x versions of OpenSearch.
|
||||
You can now start your OpenSearch cluster. The 7.10.2 Elasticsearch OSS high-level REST client works with the 1.x versions of OpenSearch.
|
||||
|
||||
## Sample code
|
||||
|
||||
|
|
|
@ -0,0 +1,141 @@
|
|||
---
|
||||
layout: default
|
||||
title: JavaScript client
|
||||
nav_order: 90
|
||||
---
|
||||
|
||||
# JavaScript client
|
||||
|
||||
The OpenSearch JavaScript client provides a safer and easier way to interact with your OpenSearch cluster. Rather than using OpenSearch from the browser and potentially exposing your data to the public, you can build an OpenSearch client that takes care of sending requests to your cluster.
|
||||
|
||||
The client contains a library of APIs that let you perform different operations on your cluster and return a standard response body. The example here demonstrates some basic operations like creating an index, adding documents, and searching your data.
|
||||
|
||||
## Setup
|
||||
|
||||
To add the client to your project, install it from [npm](https://www.npmjs.com):
|
||||
|
||||
```bash
|
||||
npm install @opensearch-project/opensearch
|
||||
```
|
||||
|
||||
To install a specific major version of the client, run the following command:
|
||||
|
||||
```bash
|
||||
npm install @opensearch-project/opensearch@<version>
|
||||
```
|
||||
|
||||
If you prefer to add the client manually or just want to examine the source code, see [opensearch-js](https://github.com/opensearch-project/opensearch-js) on GitHub.
|
||||
|
||||
Then require the client:
|
||||
|
||||
```javascript
|
||||
const { Client } = require("@opensearch-project/opensearch");
|
||||
```
|
||||
|
||||
## Sample code
|
||||
|
||||
```javascript
|
||||
"use strict";
|
||||
|
||||
var host = "localhost";
|
||||
var protocol = "https";
|
||||
var port = 9200;
|
||||
var auth = "admin:admin"; // For testing only. Don't store credentials in code.
|
||||
var ca_certs_path = "/full/path/to/root-ca.pem";
|
||||
|
||||
// Optional client certificates if you don't want to use HTTP basic authentication.
|
||||
// var client_cert_path = '/full/path/to/client.pem'
|
||||
// var client_key_path = '/full/path/to/client-key.pem'
|
||||
|
||||
// Create a client with SSL/TLS enabled.
|
||||
var { Client } = require("@opensearch-project/opensearch");
|
||||
var fs = require("fs");
|
||||
var client = new Client({
|
||||
node: protocol + "://" + auth + "@" + host + ":" + port,
|
||||
ssl: {
|
||||
ca: fs.readFileSync(ca_certs_path),
|
||||
// You can turn off certificate verification (rejectUnauthorized: false) if you're using self-signed certificates with a hostname mismatch.
|
||||
// cert: fs.readFileSync(client_cert_path),
|
||||
// key: fs.readFileSync(client_key_path)
|
||||
},
|
||||
});
|
||||
|
||||
async function search() {
|
||||
// Create an index with non-default settings.
|
||||
var index_name = "books";
|
||||
var settings = {
|
||||
settings: {
|
||||
index: {
|
||||
number_of_shards: 4,
|
||||
number_of_replicas: 3,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
var response = await client.indices.create({
|
||||
index: index_name,
|
||||
body: settings,
|
||||
});
|
||||
|
||||
console.log("Creating index:");
|
||||
console.log(response.body);
|
||||
|
||||
// Add a document to the index.
|
||||
var document = {
|
||||
title: "The Outsider",
|
||||
author: "Stephen King",
|
||||
year: "2018",
|
||||
genre: "Crime fiction",
|
||||
};
|
||||
|
||||
var id = "1";
|
||||
|
||||
var response = await client.index({
|
||||
id: id,
|
||||
index: index_name,
|
||||
body: document,
|
||||
refresh: true,
|
||||
});
|
||||
|
||||
console.log("Adding document:");
|
||||
console.log(response.body);
|
||||
|
||||
// Search for the document.
|
||||
var query = {
|
||||
query: {
|
||||
match: {
|
||||
title: {
|
||||
query: "The Outsider",
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
var response = await client.search({
|
||||
index: index_name,
|
||||
body: query,
|
||||
});
|
||||
|
||||
console.log("Search results:");
|
||||
console.log(response.body.hits);
|
||||
|
||||
// Delete the document.
|
||||
var response = await client.delete({
|
||||
index: index_name,
|
||||
id: id,
|
||||
});
|
||||
|
||||
console.log("Deleting document:");
|
||||
console.log(response.body);
|
||||
|
||||
// Delete the index.
|
||||
var response = await client.indices.delete({
|
||||
index: index_name,
|
||||
});
|
||||
|
||||
console.log("Deleting index:");
|
||||
console.log(response.body);
|
||||
}
|
||||
|
||||
search().catch(console.log);
|
||||
```
|
|
@ -0,0 +1,128 @@
|
|||
---
|
||||
layout: default
|
||||
title: Python client
|
||||
nav_order: 70
|
||||
---
|
||||
|
||||
# Python client
|
||||
|
||||
The OpenSearch Python client provides a more natural syntax for interacting with your cluster. Rather than sending HTTP requests to a given URL, you can create an OpenSearch client for your cluster and call the client's built-in functions.
|
||||
|
||||
{% comment %}
|
||||
`opensearch-py` is the lower-level of the two Python clients. If you want a general client for assorted operations, it's a great choice. If you want a higher-level client strictly for indexing and search operations, consider [opensearch-dsl-py]({{site.url}}{{site.baseurl}}/clients/python-dsl/).
|
||||
{% endcomment %}
|
||||
|
||||
|
||||
## Setup
|
||||
|
||||
To add the client to your project, install it using [pip](https://pip.pypa.io/):
|
||||
|
||||
```bash
|
||||
pip install opensearch-py
|
||||
```
|
||||
|
||||
Then import it like any other module:
|
||||
|
||||
```python
|
||||
from opensearchpy import OpenSearch
|
||||
```
|
||||
|
||||
If you prefer to add the client manually or just want to examine the source code, see [opensearch-py on GitHub](https://github.com/opensearch-project/opensearch-py).
|
||||
|
||||
|
||||
## Sample code
|
||||
|
||||
```python
|
||||
from opensearchpy import OpenSearch
|
||||
|
||||
host = 'localhost'
|
||||
port = 9200
|
||||
auth = ('admin', 'admin') # For testing only. Don't store credentials in code.
|
||||
ca_certs_path = '/full/path/to/root-ca.pem' # Provide a CA bundle if you use intermediate CAs with your root CA.
|
||||
|
||||
# Optional client certificates if you don't want to use HTTP basic authentication.
|
||||
# client_cert_path = '/full/path/to/client.pem'
|
||||
# client_key_path = '/full/path/to/client-key.pem'
|
||||
|
||||
# Create the client with SSL/TLS enabled, but hostname verification disabled.
|
||||
client = OpenSearch(
|
||||
hosts = [{'host': host, 'port': port}],
|
||||
http_compress = True, # enables gzip compression for request bodies
|
||||
http_auth = auth,
|
||||
# client_cert = client_cert_path,
|
||||
# client_key = client_key_path,
|
||||
use_ssl = True,
|
||||
verify_certs = True,
|
||||
ssl_assert_hostname = False,
|
||||
ssl_show_warn = False,
|
||||
ca_certs = ca_certs_path
|
||||
)
|
||||
|
||||
# Create an index with non-default settings.
|
||||
index_name = 'python-test-index'
|
||||
index_body = {
|
||||
'settings': {
|
||||
'index': {
|
||||
'number_of_shards': 4
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
response = client.indices.create(index_name, body=index_body)
|
||||
print('\nCreating index:')
|
||||
print(response)
|
||||
|
||||
# Add a document to the index.
|
||||
document = {
|
||||
'title': 'Moneyball',
|
||||
'director': 'Bennett Miller',
|
||||
'year': '2011'
|
||||
}
|
||||
id = '1'
|
||||
|
||||
response = client.index(
|
||||
index = index_name,
|
||||
body = document,
|
||||
id = id,
|
||||
refresh = True
|
||||
)
|
||||
|
||||
print('\nAdding document:')
|
||||
print(response)
|
||||
|
||||
# Search for the document.
|
||||
q = 'miller'
|
||||
query = {
|
||||
'size': 5,
|
||||
'query': {
|
||||
'multi_match': {
|
||||
'query': q,
|
||||
'fields': ['title^2', 'director']
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
response = client.search(
|
||||
body = query,
|
||||
index = index_name
|
||||
)
|
||||
print('\nSearch results:')
|
||||
print(response)
|
||||
|
||||
# Delete the document.
|
||||
response = client.delete(
|
||||
index = index_name,
|
||||
id = id
|
||||
)
|
||||
|
||||
print('\nDeleting document:')
|
||||
print(response)
|
||||
|
||||
# Delete the index.
|
||||
response = client.indices.delete(
|
||||
index = index_name
|
||||
)
|
||||
|
||||
print('\nDeleting index:')
|
||||
print(response)
|
||||
```
|
|
@ -90,36 +90,36 @@ You can specify the following options.
|
|||
|
||||
Options | Description | Type | Required
|
||||
:--- | :--- |:--- |:--- |
|
||||
`source_index` | The name of the detector. | `string` | Yes
|
||||
`target_index` | Specify the target index that the rolled up data is ingested into. You could either create a new target index or use an existing index. The target index cannot be a combination of raw and rolled up data. | `string` | Yes
|
||||
`schedule` | Schedule of the index rollup job which can be an interval or a cron expression. | `object` | Yes
|
||||
`schedule.interval` | Specify the frequency of execution of the rollup job. | `object` | No
|
||||
`schedule.interval.start_time` | Start time of the interval. | `timestamp` | Yes
|
||||
`schedule.interval.period` | Define the interval period. | `string` | Yes
|
||||
`schedule.interval.unit` | Specify the time unit of the interval. | `string` | Yes
|
||||
`schedule.interval.cron` | Optionally, specify a cron expression to define therollup frequency. | `list` | No
|
||||
`schedule.interval.cron.expression` | Specify a Unix cron expression. | `string` | Yes
|
||||
`schedule.interval.cron.timezone` | Specify timezones as defined by the IANA Time Zone Database. Defaults to UTC. | `string` | No
|
||||
`description` | Optionally, describe the rollup job. | `string` | No
|
||||
`enabled` | When true, the index rollup job is scheduled. Default is true. | `boolean` | Yes
|
||||
`continuous` | Specify whether or not the index rollup job continuously rolls up data forever or just executes over the current data set once and stops. Default is false. | `boolean` | Yes
|
||||
`error_notification` | Set up a Mustache message template sent for error notifications. For example, if an index rollup job fails, the system sends a message to a Slack channel. | `object` | No
|
||||
`page_size` | Specify the number of buckets to paginate through at a time while rolling up. | `number` | Yes
|
||||
`delay` | Specify time value to delay execution of the index rollup job. | `time_unit` | No
|
||||
`dimensions` | Specify aggregations to create dimensions for the roll up time window. | `object` | Yes
|
||||
`dimensions.date_histogram` | Specify either fixed_interval or calendar_interval, but not both. Either one limits what you can query in the target index. | `object` | No
|
||||
`dimensions.date_histogram.fixed_interval` | Specify the fixed interval for aggregations in milliseconds, seconds, minutes, hours, or days. | `string` | No
|
||||
`dimensions.date_histogram.calendar_interval` | Specify the calendar interval for aggregations in minutes, hours, days, weeks, months, quarters, or years. | `string` | No
|
||||
`dimensions.date_histogram.field` | Specify the date field used in date histogram aggregation. | `string` | No
|
||||
`dimensions.date_histogram.timezone` | Specify the timezones as defined by the IANA Time Zone Database. The default is UTC. | `string` | No
|
||||
`dimensions.terms` | Specify the term aggregations that you want to roll up. | `object` | No
|
||||
`dimensions.terms.fields` | Specify terms aggregation for compatible fields. | `object` | No
|
||||
`dimensions.histogram` | Specify the histogram aggregations that you want to roll up. | `object` | No
|
||||
`dimensions.histogram.field` | Add a field for histogram aggregations. | `string` | Yes
|
||||
`dimensions.histogram.interval` | Specify the histogram aggregation interval for the field. | `long` | Yes
|
||||
`dimensions.metrics` | Specify a list of objects that represent the fields and metrics that you want to calculate. | `nested object` | No
|
||||
`dimensions.metrics.field` | Specify the field that you want to perform metric aggregations on. | `string` | No
|
||||
`dimensions.metrics.field.metrics` | Specify the metric aggregations you want to calculate for the field. | `multiple strings` | No
|
||||
`source_index` | The name of the detector. | String | Yes
|
||||
`target_index` | Specify the target index that the rolled up data is ingested into. You could either create a new target index or use an existing index. The target index cannot be a combination of raw and rolled up data. | String | Yes
|
||||
`schedule` | Schedule of the index rollup job which can be an interval or a cron expression. | Object | Yes
|
||||
`schedule.interval` | Specify the frequency of execution of the rollup job. | Object | No
|
||||
`schedule.interval.start_time` | Start time of the interval. | Timestamp | Yes
|
||||
`schedule.interval.period` | Define the interval period. | String | Yes
|
||||
`schedule.interval.unit` | Specify the time unit of the interval. | String | Yes
|
||||
`schedule.interval.cron` | Optionally, specify a cron expression to define therollup frequency. | List | No
|
||||
`schedule.interval.cron.expression` | Specify a Unix cron expression. | String | Yes
|
||||
`schedule.interval.cron.timezone` | Specify timezones as defined by the IANA Time Zone Database. Defaults to UTC. | String | No
|
||||
`description` | Optionally, describe the rollup job. | String | No
|
||||
`enabled` | When true, the index rollup job is scheduled. Default is true. | Boolean | Yes
|
||||
`continuous` | Specify whether or not the index rollup job continuously rolls up data forever or just executes over the current data set once and stops. Default is false. | Boolean | Yes
|
||||
`error_notification` | Set up a Mustache message template sent for error notifications. For example, if an index rollup job fails, the system sends a message to a Slack channel. | Object | No
|
||||
`page_size` | Specify the number of buckets to paginate through at a time while rolling up. | Number | Yes
|
||||
`delay` | The number of milliseconds to delay execution of the index rollup job. | Long | No
|
||||
`dimensions` | Specify aggregations to create dimensions for the roll up time window. | Object | Yes
|
||||
`dimensions.date_histogram` | Specify either fixed_interval or calendar_interval, but not both. Either one limits what you can query in the target index. | Object | No
|
||||
`dimensions.date_histogram.fixed_interval` | Specify the fixed interval for aggregations in milliseconds, seconds, minutes, hours, or days. | String | No
|
||||
`dimensions.date_histogram.calendar_interval` | Specify the calendar interval for aggregations in minutes, hours, days, weeks, months, quarters, or years. | String | No
|
||||
`dimensions.date_histogram.field` | Specify the date field used in date histogram aggregation. | String | No
|
||||
`dimensions.date_histogram.timezone` | Specify the timezones as defined by the IANA Time Zone Database. The default is UTC. | String | No
|
||||
`dimensions.terms` | Specify the term aggregations that you want to roll up. | Object | No
|
||||
`dimensions.terms.fields` | Specify terms aggregation for compatible fields. | Object | No
|
||||
`dimensions.histogram` | Specify the histogram aggregations that you want to roll up. | Object | No
|
||||
`dimensions.histogram.field` | Add a field for histogram aggregations. | String | Yes
|
||||
`dimensions.histogram.interval` | Specify the histogram aggregation interval for the field. | Long | Yes
|
||||
`dimensions.metrics` | Specify a list of objects that represent the fields and metrics that you want to calculate. | Nested object | No
|
||||
`dimensions.metrics.field` | Specify the field that you want to perform metric aggregations on. | String | No
|
||||
`dimensions.metrics.field.metrics` | Specify the metric aggregations you want to calculate for the field. | Multiple strings | No
|
||||
|
||||
|
||||
#### Sample response
|
||||
|
|
|
@ -55,6 +55,8 @@ PUT _plugins/_ism/policies/policy_id
|
|||
}
|
||||
```
|
||||
|
||||
If you have more than one template that matches an index pattern, ISM uses the priority value to determine which template to apply.
|
||||
|
||||
For an example ISM template policy, see [Sample policy with ISM template]({{site.url}}{{site.baseurl}}/im-plugin/ism/policies#sample-policy-with-ism-template).
|
||||
|
||||
Older versions of the plugin include the `policy_id` in an index template, so when an index is created that matches the index template pattern, the index will have the policy attached to it:
|
||||
|
@ -89,6 +91,7 @@ Make sure that the alias that you enter already exists. For more information abo
|
|||
|
||||
After you attach a policy to an index, ISM creates a job that runs every 5 minutes by default to perform policy actions, check conditions, and transition the index into different states. To change the default time interval for this job, see [Settings]({{site.url}}{{site.baseurl}}/im-plugin/ism/settings/).
|
||||
|
||||
ISM does not run jobs if the cluster state is red.
|
||||
|
||||
### Step 3: Manage indices
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
layout: default
|
||||
title: Create Dashboards
|
||||
title: Create PerfTop Dashboards
|
||||
parent: Performance Analyzer
|
||||
nav_order: 2
|
||||
---
|
||||
|
|
|
@ -48,11 +48,11 @@ Otherwise, just specify the OpenSearch endpoint:
|
|||
./opensearch-perf-top-macos --dashboard dashboards/<dashboard>.json --endpoint my-cluster.my-domain.com
|
||||
```
|
||||
|
||||
PerfTop has four pre-built dashboards in the `dashboards` directory, but you can also [create your own]({{site.url}}{{site.baseurl}}/dashboards/).
|
||||
PerfTop has four pre-built dashboards in the `dashboards` directory, but you can also [create your own]({{site.url}}{{site.baseurl}}/monitoring-plugins/pa/dashboards/).
|
||||
|
||||
You can also load the pre-built dashboards (ClusterOverview, ClusterNetworkMemoryAnalysis, ClusterThreadAnalysis, or NodeAnalysis) without the JSON files, such as `--dashboard ClusterThreadAnalysis`.
|
||||
|
||||
PerfTop has no interactivity. Start the application, monitor the dashboard, and press esc, q, or Ctrl + C to quit.
|
||||
PerfTop has no interactivity. Start the application, monitor the dashboard, and press Esc, Q, or Ctrl + C to quit.
|
||||
{: .note }
|
||||
|
||||
|
||||
|
|
|
@ -65,4 +65,12 @@ PUT _cluster/settings
|
|||
|
||||
You can find `opensearch.yml` in `/usr/share/opensearch/config/opensearch.yml` (Docker) or `/etc/opensearch/opensearch.yml` (most Linux distributions) on each node.
|
||||
|
||||
You don't mark settings in `opensearch.yml` as persistent or transient, and settings use the flat form:
|
||||
|
||||
```yml
|
||||
cluster.name: my-application
|
||||
action.auto_create_index: true
|
||||
compatibility.override_main_response_version: true
|
||||
```
|
||||
|
||||
The demo configuration includes a number of settings for the security plugin that you should modify before using OpenSearch for a production workload. To learn more, see [Security]({{site.url}}{{site.baseurl}}/security-plugin/).
|
||||
|
|
|
@ -31,16 +31,16 @@ The default Helm chart deploys a three-node cluster. We recommend that you have
|
|||
|
||||
## Install OpenSearch using Helm
|
||||
|
||||
1. Clone the [opensearch-devops](https://github.com/opensearch-project/opensearch-devops/) repository:
|
||||
1. Clone the [helm-charts](https://github.com/opensearch-project/helm-charts) repository:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/opensearch-project/opensearch-devops.git
|
||||
git clone https://github.com/opensearch-project/helm-charts
|
||||
```
|
||||
|
||||
1. Change to the `opensearch` directory:
|
||||
|
||||
```bash
|
||||
cd Helm/opensearch
|
||||
cd charts/opensearch
|
||||
```
|
||||
|
||||
1. Package the Helm chart:
|
||||
|
|
|
@ -111,7 +111,7 @@ In a tarball installation, Performance Analyzer collects data when it is enabled
|
|||
1. Launch the agent CLI:
|
||||
|
||||
```bash
|
||||
ES_HOME="$PWD" ./bin/performance-analyzer-agent-cli
|
||||
OPENSEARCH_HOME="$PWD" ./bin/performance-analyzer-agent-cli
|
||||
```
|
||||
|
||||
1. In a separate window, enable the Performance Analyzer plugin:
|
||||
|
|
|
@ -5,8 +5,11 @@ nav_order: 27
|
|||
has_children: true
|
||||
redirect_from:
|
||||
- /opensearch/query-dsl/
|
||||
- /docs/opensearch/query-dsl/
|
||||
---
|
||||
|
||||
{%- comment -%}The `/docs/opensearch/query-dsl/` redirect is specifically to support the UI links in OpenSearch Dashboards 1.0.0.{%- endcomment -%}
|
||||
|
||||
# Query DSL
|
||||
|
||||
While you can use HTTP request parameters to perform simple searches, you can also use the OpenSearch query domain-specific language (DSL), which provides a wider range of search options. The query DSL uses the HTTP request body, so you can more easily customize your queries to get the exact results that you want.
|
||||
|
|
|
@ -156,28 +156,6 @@ POST _reindex
|
|||
}
|
||||
```
|
||||
|
||||
## Reindex sorted documents
|
||||
|
||||
You can copy certain documents after sorting specific fields in the document.
|
||||
|
||||
This command copies the last 10 documents based on the `timestamp` field:
|
||||
|
||||
```json
|
||||
POST _reindex
|
||||
{
|
||||
"size":10,
|
||||
"source":{
|
||||
"index":"source",
|
||||
"sort":{
|
||||
"timestamp":"desc"
|
||||
}
|
||||
},
|
||||
"dest":{
|
||||
"index":"destination"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Transform documents during reindexing
|
||||
|
||||
You can transform your data during the reindexing process using the `script` option.
|
||||
|
@ -272,7 +250,6 @@ Option | Valid values | Description | Required
|
|||
`query` | Object | The search query to use for the reindex operation. | No
|
||||
`size` | Integer | The number of documents to reindex. | No
|
||||
`slice` | String | Specify manual or automatic slicing to parallelize reindexing. | No
|
||||
`sort` | List | Sort specific fields in the document before reindexing. | No
|
||||
|
||||
## Destination index options
|
||||
|
||||
|
|
|
@ -0,0 +1,103 @@
|
|||
---
|
||||
layout: default
|
||||
title: Count
|
||||
parent: REST API reference
|
||||
nav_order: 150
|
||||
---
|
||||
|
||||
# Count
|
||||
Introduced 1.0
|
||||
{: .label .label-purple }
|
||||
|
||||
The count API gives you quick access to the number of documents that match a query.
|
||||
You can also use it to check the document count of an index, data stream, or cluster.
|
||||
|
||||
|
||||
## Example
|
||||
|
||||
To see the number of documents that match a query:
|
||||
|
||||
```json
|
||||
GET opensearch_dashboards_sample_data_logs/_count
|
||||
{
|
||||
"query": {
|
||||
"term": {
|
||||
"response": "200"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The following call to the search API produces equivalent results:
|
||||
|
||||
```json
|
||||
GET opensearch_dashboards_sample_data_logs/_search
|
||||
{
|
||||
"query": {
|
||||
"term": {
|
||||
"response": "200"
|
||||
}
|
||||
},
|
||||
"size": 0,
|
||||
"track_total_hits": true
|
||||
}
|
||||
```
|
||||
|
||||
To see the number of documents in an index:
|
||||
|
||||
```json
|
||||
GET opensearch_dashboards_sample_data_logs/_count
|
||||
```
|
||||
|
||||
To check for the number of documents in a [data stream]({{site.url}}{{site.baseurl}}/opensearch/data-streams/), replace the index name with the data stream name.
|
||||
|
||||
To see the number of documents in your cluster:
|
||||
|
||||
```json
|
||||
GET _count
|
||||
```
|
||||
|
||||
Alternatively, you could use the [cat indices]({{site.url}}{{site.baseurl}}/opensearch/rest-api/cat/cat-indices/) and [cat count]({{site.url}}{{site.baseurl}}/opensearch/rest-api/cat/cat-count/) APIs to see the number of documents per index or data stream.
|
||||
{: .note }
|
||||
|
||||
|
||||
## Path and HTTP methods
|
||||
|
||||
```
|
||||
GET <target>/_count/<id>
|
||||
POST <target>/_count/<id>
|
||||
```
|
||||
|
||||
|
||||
## URL parameters
|
||||
|
||||
All count parameters are optional.
|
||||
|
||||
Parameter | Type | Description
|
||||
:--- | :--- | :---
|
||||
`allow_no_indices` | Boolean | If false, the request returns an error if any wildcard expression or index alias targets any closed or missing indices. Default is false.
|
||||
`analyzer` | String | The analyzer to use in the query string.
|
||||
`analyze_wildcard` | Boolean | Specifies whether to analyze wildcard and prefix queries. Default is false.
|
||||
`default_operator` | String | Indicates whether the default operator for a string query should be AND or OR. Default is OR.
|
||||
`df` | String | The default field in case a field prefix is not provided in the query string.
|
||||
`expand_wildcards` | String | Specifies the type of index that wildcard expressions can match. Supports comma-separated values. Valid values are `all` (match any index), `open` (match open, non-hidden indices), `closed` (match closed, non-hidden indices), `hidden` (match hidden indices), and `none` (deny wildcard expressions). Default is `open`.
|
||||
`ignore_unavailable` | Boolean | Specifies whether to include missing or closed indices in the response. Default is false.
|
||||
`lenient` | Boolean | Specifies whether OpenSearch should accept requests if queries have format errors (for example, querying a text field for an integer). Default is false.
|
||||
`min_score` | Float | Include only documents with a minimum `_score` value in the result.
|
||||
`routing` | String | Value used to route the operation to a specific shard.
|
||||
`preference` | String | Specifies which shard or node OpenSearch should perform the count operation on.
|
||||
`terminate_after` | Integer | The maximum number of documents OpenSearch should process before terminating the request.
|
||||
|
||||
## Response
|
||||
|
||||
```json
|
||||
{
|
||||
"count" : 14074,
|
||||
"_shards" : {
|
||||
"total" : 1,
|
||||
"successful" : 1,
|
||||
"skipped" : 0,
|
||||
"failed" : 0
|
||||
}
|
||||
}
|
||||
```
|
|
@ -96,10 +96,10 @@ index.auto_expand_replicas | Whether the cluster should automatically add replic
|
|||
index.search.idle.after | Amount of time a shard should wait for a search or get request until it goes idle. Default is `30s`.
|
||||
index.refresh_interval | How often the index should refresh, which publishes its most recent changes and makes them available for searching. Can be set to `-1` to disable refreshing. Default is `1s`.
|
||||
index.max_result_window | The maximum value of `from` + `size` for searches to the index. `from` is the starting index to search from, and `size` is the amount of results to return. Default: 10000.
|
||||
index.max_inner_result_window | aximum value of `from` + `size` to return nested search hits and most relevant document aggregated during the query. `from` is the starting index to search from, and `size` is the amount of top hits to return. Default is 100.
|
||||
index.max_inner_result_window | Maximum value of `from` + `size` to return nested search hits and most relevant document aggregated during the query. `from` is the starting index to search from, and `size` is the amount of top hits to return. Default is 100.
|
||||
index.max_rescore_window | The maximum value of `window_size` for rescore requests to the index. Rescore requests reorder the index's documents and return a new score, which can be more precise. Default is the same as index.max_inner_result_window or 10000 by default.
|
||||
index.max_docvalue_fields_search | Maximum amount of `docvalue_fields` allowed in a query. Default is 100.
|
||||
index.max_script_fields | Maximum amount of`script_fields` allowed in a query. Default is 32.
|
||||
index.max_script_fields | Maximum amount of `script_fields` allowed in a query. Default is 32.
|
||||
index.max_ngram_diff | Maximum difference between `min_gram` and `max_gram` values for `NGramTokenizer` and `NGramTokenFilter` fields. Default is 1.
|
||||
index.max_shingle_diff | Maximum difference between `max_shingle_size` and `min_shingle_size` to feed into the `shingle` token filter. Default is 3.
|
||||
index.max_refresh_listeners | Maximum amount of refresh listeners each shard is allowed to have.
|
||||
|
@ -109,12 +109,12 @@ index.max_terms_count | The maximum amount of terms a terms query can accept. De
|
|||
index.max_regex_length | The maximum character length of regex that can be in a regexp query. Default is 1000.
|
||||
index.query.default_field | A field or list of fields that OpenSearch uses in queries in case a field isn't specified in the parameters.
|
||||
index.routing.allocation.enable | Specifies options for the index’s shard allocation. Available options are all (allow allocation for all shards), primaries (allow allocation only for primary shards), new_primaries (allow allocation only for new primary shards), and none (do not allow allocation). Default is all.
|
||||
index.routing.rebalance.enable - Shard rebalancing for the index. Available options are `all` (allow rebalancing for all shards), `primaries` (allow rebalancing only for primary shards), `replicas` (allow rebalancing only for replicas), and `none` (do not allow rebalancing). Default is `all`.
|
||||
index.routing.rebalance.enable | Enables shard rebalancing for the index. Available options are `all` (allow shard rebalancing for all shards), `primaries`, (allow shard rebalancing only for primary shards), `replicas` (allow shard rebalancing only for replicas), and `none` (do not allow shard rebalancing). Default is `all`.
|
||||
index.routing.rebalance.enable | Enables shard rebalancing for the index. Available options are `all` (allow rebalancing for all shards), `primaries` (allow rebalancing only for primary shards), `replicas` (allow rebalancing only for replicas), and `none` (do not allow rebalancing). Default is `all`.
|
||||
index.gc_deletes | Amount of time to retain a deleted document's version number. Default is `60s`.
|
||||
index.default_pipeline | The default ingest node pipeline for the index. If the default pipeline is set and the pipeline does not exist, then index requests fail. The pipeline name `_none` specifies that the index does not have an ingest pipeline.
|
||||
index.final_pipeline | The final ingest node pipeline for the index. If the final pipeline is set and the pipeline does not exist, then index requests fail. The pipeline name `_none` specifies that the index does not have an ingest pipeline.
|
||||
|
||||
|
||||
### Mappings
|
||||
|
||||
Mappings define how a documents and its fields are stored and indexed. If you're just starting to build out your cluster and data, you may not know exactly how your data should be stored. In those cases, you can use dynamic mappings, which tell OpenSearch to dynamically add data and their fields. However, if you know exactly what types your data fall under and want to enforce that standard, then you can use explicit mappings.
|
||||
|
|
|
@ -0,0 +1,121 @@
|
|||
---
|
||||
layout: default
|
||||
title: Reindex
|
||||
parent: Document APIs
|
||||
grand_parent: REST API reference
|
||||
nav_order: 60
|
||||
---
|
||||
|
||||
# Index document
|
||||
Introduced 1.0
|
||||
{: .label .label-purple}
|
||||
|
||||
The reindex API operation lets you copy all or a subset of your data from a source index into a destination index.
|
||||
|
||||
## Example
|
||||
|
||||
```json
|
||||
POST /_reindex
|
||||
{
|
||||
"source":{
|
||||
"index":"my-source-index"
|
||||
},
|
||||
"dest":{
|
||||
"index":"my-destination-index"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Path and HTTP methods
|
||||
|
||||
```
|
||||
POST /_reindex
|
||||
```
|
||||
|
||||
## URL parameters
|
||||
|
||||
All URL parameters are optional.
|
||||
|
||||
Parameter | Type | Description
|
||||
:--- | :--- | :---
|
||||
refresh | Boolean | If true, OpenSearch refreshes shards to make the reindex operation available to search results. Valid options are `true`, `false`, and `wait_for`, which tells OpenSearch to wait for a refresh before executing the operation. Default is `false`.
|
||||
timeout | Time | How long to wait for a response from the cluster. Default is `30s`.
|
||||
wait_for_active_shards | String | The number of active shards that must be available before OpenSearch processes the reindex request. Default is 1 (only the primary shard). Set to `all` or a positive integer. Values greater than 1 require replicas. For example, if you specify a value of 3, the index must have two replicas distributed across two additional nodes for the operation to succeed.
|
||||
wait_for_completion | Boolean | Waits for the matching tasks to complete. Default is `false`.
|
||||
requests_per_second | Integer | Specifies the request’s throttling in sub-requests per second. Default is -1, which means no throttling.
|
||||
require_alias | Boolean | Whether the destination index must be an index alias. Default is false.
|
||||
scroll | Time | How long to keep the search context open. Default is `5m`.
|
||||
slices | Integer | Number of sub-tasks OpenSearch should divide this task into. Default is 1, which means OpenSearch should not divide this task. Setting this parameter to `auto` indicates to OpenSearch that it should automatically decide how many slices to split the task into.
|
||||
max_docs | Integer | How many documents the update by query operation should process at most. Default is all documents.
|
||||
|
||||
## Request body
|
||||
|
||||
Your request body must contain the names of the source index and destination index. All other fields are optional.
|
||||
|
||||
Field | Description
|
||||
:--- | :---
|
||||
conflicts | Indicates to OpenSearch what should happen if the delete by query operation runs into a version conflict. Valid options are `abort` and `proceed`. Default is abort.
|
||||
source | Information about the source index to include. Valid fields are `index`, `max_docs`, `query`, `remote`, `size`, `slice`, and `_source`.
|
||||
index | The name of the source index to copy data from.
|
||||
max_docs | The maximum number of documents to reindex.
|
||||
query | The search query to use for the reindex operation.
|
||||
remote | Information about a remote OpenSearch cluster to copy data from. Valid fields are `host`, `username`, `password`, `socket_timeout`, and `connect_timeout`.
|
||||
host | Host URL of the OpenSearch cluster to copy data from.
|
||||
username | Username to authenticate with the remote cluster.
|
||||
password | Password to authenticate with the remote cluster.
|
||||
socket_timeout | The wait time for socket reads. Default is 30s.
|
||||
connect_timeout | The wait time for remote connection timeouts. Default is 30s.
|
||||
size | The number of documents to reindex.
|
||||
slice | Whether to manually or automatically slice the reindex operation so it executes in parallel.
|
||||
_source | Whether to reindex source fields. Speicfy a list of fields to reindex or true to reindex all fields. Default is true.
|
||||
id | The ID to associate with manual slicing.
|
||||
max | Maximum number of slices.
|
||||
dest | Information about the destination index. Valid values are `index`, `version_type`, and `op_type`.
|
||||
index | Name of the destination index.
|
||||
version_type | The indexing operation's version type. Valid values are `internal`, `external`, `external_gt` (retrieve the document if the specified version number is greater than the document’s current version), and `external_gte` (retrieve the document if the specified version number is greater or equal to than the document’s current version).
|
||||
op_type | Whether to copy over documents that are missing in the destination index. Valid values are `create` (ignore documents with the same ID from the source index) and `index` (copy everything from the source index).
|
||||
script | A script that OpenSearch uses to apply transformations to the data during the reindex operation.
|
||||
source | The actual script that OpenSearch runs.
|
||||
lang | The scripting language. Valid options are `painless`, `expression`, `mustache`, and `java`.
|
||||
|
||||
## Response
|
||||
```json
|
||||
{
|
||||
"took": 28829,
|
||||
"timed_out": false,
|
||||
"total": 111396,
|
||||
"updated": 0,
|
||||
"created": 111396,
|
||||
"deleted": 0,
|
||||
"batches": 112,
|
||||
"version_conflicts": 0,
|
||||
"noops": 0,
|
||||
"retries": {
|
||||
"bulk": 0,
|
||||
"search": 0
|
||||
},
|
||||
"throttled_millis": 0,
|
||||
"requests_per_second": -1.0,
|
||||
"throttled_until_millis": 0,
|
||||
"failures": []
|
||||
}
|
||||
```
|
||||
|
||||
## Response body fields
|
||||
|
||||
Field | Description
|
||||
:--- | :---
|
||||
took | How long the operation took in milliseconds.
|
||||
timed_out | Whether the operation timed out.
|
||||
total | The total number of documents processed.
|
||||
updated | The number of documents updated in the destination index.
|
||||
created | The number of documents created in the destination index.
|
||||
deleted | The number of documents deleted.
|
||||
batches | Number of scroll responses.
|
||||
version_conflicts | Number of version conflicts.
|
||||
noops | How many documents OpenSearch ignored during the operation.
|
||||
retries | Number of bulk and search retry requests.
|
||||
throttled_millis | Number of throttled milliseconds during the request.
|
||||
requests_per_second | Number of requests executed per second during the operation.
|
||||
throttled_until_millis | The amount of time until OpenSearch executes the next throttled request.
|
||||
failures | Any failures that occurred during the operation.
|
|
@ -80,7 +80,7 @@ wait_for_active_shards | String | The number of shards that must be active befor
|
|||
|
||||
## Request body
|
||||
|
||||
To update your indices and documents by query, you must include a [query]({{site.baseurl}}{{site.url}}/opensearch/query-dsl/index) and a script in the request body that OpenSearch can run to update your documents. If you don't specify a query, then every document in the index gets updated.
|
||||
To update your indices and documents by query, you must include a [query]({{site.url}}{{site.baseurl}}/opensearch/query-dsl/index) and a script in the request body that OpenSearch can run to update your documents. If you don't specify a query, then every document in the index gets updated.
|
||||
|
||||
```json
|
||||
{
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
---
|
||||
layout: default
|
||||
title: Remote cluster information
|
||||
parent: REST API reference
|
||||
nav_order: 25
|
||||
---
|
||||
|
||||
# Remote cluster information
|
||||
Introduced 1.0
|
||||
{: .label .label-purple }
|
||||
|
||||
This operation provides connection information for any remote OpenSearch clusters that you've configured for the local cluster, such as the remote cluster alias, connection mode (`sniff` or `proxy`), IP addresses for seed nodes, and timeout settings.
|
||||
|
||||
The response is more comprehensive and useful than a call to `_cluster/settings`, which only includes the cluster alias and seed nodes.
|
||||
|
||||
|
||||
## Path and HTTP methods
|
||||
|
||||
```
|
||||
GET _remote/info
|
||||
```
|
||||
|
||||
|
||||
## Response
|
||||
|
||||
```json
|
||||
{
|
||||
"opensearch-cluster2": {
|
||||
"connected": true,
|
||||
"mode": "sniff",
|
||||
"seeds": [
|
||||
"172.28.0.2:9300"
|
||||
],
|
||||
"num_nodes_connected": 1,
|
||||
"max_connections_per_cluster": 3,
|
||||
"initial_connect_timeout": "30s",
|
||||
"skip_unavailable": false
|
||||
}
|
||||
}
|
||||
```
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
layout: default
|
||||
title: API
|
||||
parent: Access Control
|
||||
parent: Access control
|
||||
nav_order: 90
|
||||
---
|
||||
|
||||
|
@ -1159,6 +1159,12 @@ Introduced 1.0
|
|||
|
||||
Updates the existing configuration using the REST API. This operation can easily break your existing configuration, so we recommend using `securityadmin.sh` instead, which is far safer. See [Access control for the API](#access-control-for-the-api) for how to enable this operation.
|
||||
|
||||
Before you can execute the operation, you must first add the following line to `opensearch.yml`:
|
||||
|
||||
```yml
|
||||
plugins.security.unsupported.restapi.allow_securityconfig_modification: true
|
||||
```
|
||||
|
||||
#### Request
|
||||
|
||||
```json
|
||||
|
@ -1179,6 +1185,106 @@ PATCH _plugins/_security/api/securityconfig
|
|||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Distinguished names
|
||||
|
||||
These REST APIs let a super admin add, retrieve, update, or delete any distinguished names from an allow list to enable communication between clusters and/or nodes.
|
||||
|
||||
Before you can use the REST API to configure the allow list, you must first add the following line to `opensearch.yml`:
|
||||
|
||||
```yml
|
||||
plugins.security.nodes_dn_dynamic_config_enabled: true
|
||||
```
|
||||
|
||||
|
||||
### Get distinguished names
|
||||
|
||||
Retrieves all distinguished names in the allow list.
|
||||
|
||||
#### Request
|
||||
|
||||
```
|
||||
GET _plugins/_security/api/nodesdn
|
||||
```
|
||||
|
||||
#### Sample response
|
||||
|
||||
```json
|
||||
{
|
||||
"cluster1": {
|
||||
"nodes_dn": [
|
||||
"CN=cluster1.example.com"
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
To get the distinguished names from a specific cluster's or node's allow list, include the cluster's name in the request path.
|
||||
|
||||
#### Request
|
||||
|
||||
```
|
||||
GET _plugins/_security/api/nodesdn/<cluster-name>
|
||||
```
|
||||
|
||||
#### Sample response
|
||||
|
||||
```json
|
||||
{
|
||||
"cluster3": {
|
||||
"nodes_dn": [
|
||||
"CN=cluster3.example.com"
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
### Update distinguished names
|
||||
|
||||
Adds or updates the specified distinguished names in the cluster's or node's allow list.
|
||||
|
||||
#### Request
|
||||
|
||||
```json
|
||||
PUT _plugins/_security/api/nodesdn/<cluster-name>
|
||||
{
|
||||
"nodes_dn": [
|
||||
"CN=cluster3.example.com"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
#### Sample response
|
||||
|
||||
```json
|
||||
{
|
||||
"status": "CREATED",
|
||||
"message": "'cluster3' created."
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
### Delete distinguished names
|
||||
|
||||
Deletes all distinguished names in the specified cluster's or node's allow list.
|
||||
|
||||
#### Request
|
||||
|
||||
```
|
||||
DELETE _plugins/_security/api/nodesdn/<cluster-name>
|
||||
```
|
||||
|
||||
#### Sample response
|
||||
|
||||
```json
|
||||
{
|
||||
"status": "OK",
|
||||
"message": "'cluster3' deleted."
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
---
|
||||
|
||||
|
@ -1188,101 +1294,38 @@ PATCH _plugins/_security/api/securityconfig
|
|||
Introduced 1.0
|
||||
{: .label .label-purple }
|
||||
|
||||
Retrieves the current security plugin configuration in JSON format.
|
||||
Retrieves the cluster's security certificates.
|
||||
|
||||
#### Request
|
||||
|
||||
```
|
||||
GET _plugins/_security/api/securityconfig
|
||||
```
|
||||
|
||||
|
||||
### Update configuration
|
||||
Introduced 1.0
|
||||
{: .label .label-purple }
|
||||
|
||||
Creates or updates the existing configuration using the REST API rather than `securityadmin.sh`. This operation can easily break your existing configuration, so we recommend using `securityadmin.sh` instead. See [Access control for the API](#access-control-for-the-api) for how to enable this operation.
|
||||
|
||||
#### Request
|
||||
|
||||
```json
|
||||
PUT _plugins/_security/api/securityconfig/config
|
||||
{
|
||||
"dynamic": {
|
||||
"filtered_alias_mode": "warn",
|
||||
"disable_rest_auth": false,
|
||||
"disable_intertransport_auth": false,
|
||||
"respect_request_indices_options": false,
|
||||
"opensearch-dashboards": {
|
||||
"multitenancy_enabled": true,
|
||||
"server_username": "kibanaserver",
|
||||
"index": ".opensearch-dashboards"
|
||||
},
|
||||
"http": {
|
||||
"anonymous_auth_enabled": false
|
||||
},
|
||||
"authc": {
|
||||
"basic_internal_auth_domain": {
|
||||
"http_enabled": true,
|
||||
"transport_enabled": true,
|
||||
"order": 0,
|
||||
"http_authenticator": {
|
||||
"challenge": true,
|
||||
"type": "basic",
|
||||
"config": {}
|
||||
},
|
||||
"authentication_backend": {
|
||||
"type": "intern",
|
||||
"config": {}
|
||||
},
|
||||
"description": "Authenticate via HTTP Basic against internal users database"
|
||||
}
|
||||
},
|
||||
"auth_failure_listeners": {},
|
||||
"do_not_fail_on_forbidden": false,
|
||||
"multi_rolespan_enabled": true,
|
||||
"hosts_resolver_mode": "ip-only",
|
||||
"do_not_fail_on_forbidden_empty": false
|
||||
}
|
||||
}
|
||||
GET _opendistro/_security/api/ssl/certs
|
||||
```
|
||||
|
||||
#### Sample response
|
||||
|
||||
```json
|
||||
{
|
||||
"status": "OK",
|
||||
"message": "'config' updated."
|
||||
"http_certificates_list": [
|
||||
{
|
||||
"issuer_dn": "CN=Example Com Inc. Root CA,OU=Example Com Inc. Root CA,O=Example Com Inc.,DC=example,DC=com",
|
||||
"subject_dn": "CN=node-0.example.com,OU=node,O=node,L=test,DC=de",
|
||||
"san": "[[8, 1.2.3.4.5.5], [2, node-0.example.com]",
|
||||
"not_before": "2018-04-22T03:43:47Z",
|
||||
"not_after": "2028-04-19T03:43:47Z"
|
||||
}
|
||||
],
|
||||
"transport_certificates_list": [
|
||||
{
|
||||
"issuer_dn": "CN=Example Com Inc. Root CA,OU=Example Com Inc. Root CA,O=Example Com Inc.,DC=example,DC=com",
|
||||
"subject_dn": "CN=node-0.example.com,OU=node,O=node,L=test,DC=de",
|
||||
"san": "[[8, 1.2.3.4.5.5], [2, node-0.example.com]",
|
||||
"not_before": "2018-04-22T03:43:47Z",
|
||||
"not_after": "2028-04-19T03:43:47Z"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
### Patch configuration
|
||||
Introduced 1.0
|
||||
{: .label .label-purple }
|
||||
|
||||
Updates the existing configuration using the REST API rather than `securityadmin.sh`. This operation can easily break your existing configuration, so we recommend using `securityadmin.sh` instead. See [Access control for the API](#access-control-for-the-api) for how to enable this operation.
|
||||
|
||||
#### Request
|
||||
|
||||
```json
|
||||
PATCH _plugins/_security/api/securityconfig
|
||||
[
|
||||
{
|
||||
"op": "replace", "path": "/config/dynamic/authc/basic_internal_auth_domain/transport_enabled", "value": "true"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
#### Sample response
|
||||
|
||||
```json
|
||||
{
|
||||
"status": "OK",
|
||||
"message": "Resource updated."
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Cache
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
layout: default
|
||||
title: Cross-Cluster Search
|
||||
parent: Access Control
|
||||
title: Cross-cluster search
|
||||
parent: Access control
|
||||
nav_order: 40
|
||||
---
|
||||
|
||||
|
@ -65,11 +65,11 @@ Save this file as `docker-compose.yml` and run `docker-compose up` to start two
|
|||
```yml
|
||||
version: '3'
|
||||
services:
|
||||
opensearch-node1:
|
||||
opensearch-ccs-node1:
|
||||
image: opensearchproject/opensearch:{{site.opensearch_version}}
|
||||
container_name: opensearch-node1
|
||||
container_name: opensearch-ccs-node1
|
||||
environment:
|
||||
- cluster.name=opensearch-cluster1
|
||||
- cluster.name=opensearch-ccs-cluster1
|
||||
- discovery.type=single-node
|
||||
- bootstrap.memory_lock=true # along with the memlock settings below, disables swapping
|
||||
- "OPENSEARCH_JAVA_OPTS=-Xms512m -Xmx512m" # minimum and maximum Java heap size, recommend setting both to 50% of system RAM
|
||||
|
@ -85,11 +85,11 @@ services:
|
|||
networks:
|
||||
- opensearch-net
|
||||
|
||||
opensearch-node2:
|
||||
opensearch-ccs-node2:
|
||||
image: opensearchproject/opensearch:{{site.opensearch_version}}
|
||||
container_name: opensearch-node2
|
||||
container_name: opensearch-ccs-node2
|
||||
environment:
|
||||
- cluster.name=opensearch-cluster2
|
||||
- cluster.name=opensearch-ccs-cluster2
|
||||
- discovery.type=single-node
|
||||
- bootstrap.memory_lock=true # along with the memlock settings below, disables swapping
|
||||
- "OPENSEARCH_JAVA_OPTS=-Xms512m -Xmx512m" # minimum and maximum Java heap size, recommend setting both to 50% of system RAM
|
||||
|
@ -118,26 +118,26 @@ After the clusters start, verify the names of each:
|
|||
```json
|
||||
curl -XGET -u 'admin:admin' -k 'https://localhost:9200'
|
||||
{
|
||||
"cluster_name" : "opensearch-cluster1",
|
||||
"cluster_name" : "opensearch-ccs-cluster1",
|
||||
...
|
||||
}
|
||||
|
||||
curl -XGET -u 'admin:admin' -k 'https://localhost:9250'
|
||||
{
|
||||
"cluster_name" : "opensearch-cluster2",
|
||||
"cluster_name" : "opensearch-ccs-cluster2",
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
Both clusters run on `localhost`, so the important identifier is the port number. In this case, use port 9200 (`opensearch-node1`) as the remote cluster, and port 9250 (`opensearch-node2`) as the coordinating cluster.
|
||||
Both clusters run on `localhost`, so the important identifier is the port number. In this case, use port 9200 (`opensearch-ccs-node1`) as the remote cluster, and port 9250 (`opensearch-ccs-node2`) as the coordinating cluster.
|
||||
|
||||
To get the IP address for the remote cluster, first identify its container ID:
|
||||
|
||||
```bash
|
||||
docker ps
|
||||
CONTAINER ID IMAGE PORTS NAMES
|
||||
6fe89ebc5a8e opensearchproject/opensearch:{{site.opensearch_version}} 0.0.0.0:9200->9200/tcp, 0.0.0.0:9600->9600/tcp, 9300/tcp opensearch-node1
|
||||
2da08b6c54d8 opensearchproject/opensearch:{{site.opensearch_version}} 9300/tcp, 0.0.0.0:9250->9200/tcp, 0.0.0.0:9700->9600/tcp opensearch-node2
|
||||
6fe89ebc5a8e opensearchproject/opensearch:{{site.opensearch_version}} 0.0.0.0:9200->9200/tcp, 0.0.0.0:9600->9600/tcp, 9300/tcp opensearch-ccs-node1
|
||||
2da08b6c54d8 opensearchproject/opensearch:{{site.opensearch_version}} 9300/tcp, 0.0.0.0:9250->9200/tcp, 0.0.0.0:9700->9600/tcp opensearch-ccs-node2
|
||||
```
|
||||
|
||||
Then get that container's IP address:
|
||||
|
@ -154,7 +154,7 @@ curl -k -XPUT -H 'Content-Type: application/json' -u 'admin:admin' 'https://loca
|
|||
{
|
||||
"persistent": {
|
||||
"search.remote": {
|
||||
"opensearch-cluster1": {
|
||||
"opensearch-ccs-cluster1": {
|
||||
"seeds": ["172.31.0.3:9300"]
|
||||
}
|
||||
}
|
||||
|
@ -171,11 +171,11 @@ curl -XPUT -k -H 'Content-Type: application/json' -u 'admin:admin' 'https://loca
|
|||
At this point, cross-cluster search works. You can test it using the `admin` user:
|
||||
|
||||
```bash
|
||||
curl -XGET -k -u 'admin:admin' 'https://localhost:9250/opensearch-cluster1:books/_search?pretty'
|
||||
curl -XGET -k -u 'admin:admin' 'https://localhost:9250/opensearch-ccs-cluster1:books/_search?pretty'
|
||||
{
|
||||
...
|
||||
"hits": [{
|
||||
"_index": "opensearch-cluster1:books",
|
||||
"_index": "opensearch-ccs-cluster1:books",
|
||||
"_type": "_doc",
|
||||
"_id": "1",
|
||||
"_score": 1.0,
|
||||
|
@ -196,7 +196,7 @@ curl -XPUT -k -u 'admin:admin' 'https://localhost:9250/_plugins/_security/api/in
|
|||
Then run the same search as before with `booksuser`:
|
||||
|
||||
```json
|
||||
curl -XGET -k -u booksuser:password 'https://localhost:9250/opensearch-cluster1:books/_search?pretty'
|
||||
curl -XGET -k -u booksuser:password 'https://localhost:9250/opensearch-ccs-cluster1:books/_search?pretty'
|
||||
{
|
||||
"error" : {
|
||||
"root_cause" : [
|
||||
|
@ -225,11 +225,11 @@ Both clusters must have the user, but only the remote cluster needs the role and
|
|||
Finally, repeat the search:
|
||||
|
||||
```bash
|
||||
curl -XGET -k -u booksuser:password 'https://localhost:9250/opensearch-cluster1:books/_search?pretty'
|
||||
curl -XGET -k -u booksuser:password 'https://localhost:9250/opensearch-ccs-cluster1:books/_search?pretty'
|
||||
{
|
||||
...
|
||||
"hits": [{
|
||||
"_index": "opensearch-cluster1:books",
|
||||
"_index": "opensearch-ccs-cluster1:books",
|
||||
"_type": "_doc",
|
||||
"_id": "1",
|
||||
"_score": 1.0,
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
layout: default
|
||||
title: Default Action Groups
|
||||
parent: Access Control
|
||||
title: Default action groups
|
||||
parent: Access control
|
||||
nav_order: 51
|
||||
---
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
layout: default
|
||||
title: Document-Level Security
|
||||
parent: Access Control
|
||||
title: Document-level security
|
||||
parent: Access control
|
||||
nav_order: 10
|
||||
---
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
layout: default
|
||||
title: Field-Level Security
|
||||
parent: Access Control
|
||||
title: Field-level security
|
||||
parent: Access control
|
||||
nav_order: 11
|
||||
---
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
layout: default
|
||||
title: Field Masking
|
||||
parent: Access Control
|
||||
title: Field masking
|
||||
parent: Access control
|
||||
nav_order: 12
|
||||
---
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
layout: default
|
||||
title: User Impersonation
|
||||
parent: Access Control
|
||||
title: User impersonation
|
||||
parent: Access control
|
||||
nav_order: 20
|
||||
---
|
||||
|
||||
|
@ -41,8 +41,8 @@ plugins.security.authcz.impersonation_dn:
|
|||
|
||||
## Impersonating Users
|
||||
|
||||
To impersonate another user, submit a request to the system with the HTTP header `opensearch_security_impersonate_as` set to the name of the user to be impersonated. A good test is to make a GET request to the `_plugins/_security/authinfo` URI:
|
||||
To impersonate another user, submit a request to the system with the HTTP header `opendistro_security_impersonate_as` set to the name of the user to be impersonated. A good test is to make a GET request to the `_plugins/_security/authinfo` URI:
|
||||
|
||||
```bash
|
||||
curl -XGET -u 'admin:admin' -k -H "opensearch_security_impersonate_as: user_1" https://localhost:9200/_plugins/_security/authinfo?pretty
|
||||
curl -XGET -u 'admin:admin' -k -H "opendistro_security_impersonate_as: user_1" https://localhost:9200/_plugins/_security/authinfo?pretty
|
||||
```
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
layout: default
|
||||
title: Access Control
|
||||
title: Access control
|
||||
nav_order: 10
|
||||
has_children: true
|
||||
has_toc: false
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
layout: default
|
||||
title: OpenSearch Dashboards Multi-Tenancy
|
||||
parent: Access Control
|
||||
title: OpenSearch Dashboards multi-tenancy
|
||||
parent: Access control
|
||||
nav_order: 30
|
||||
---
|
||||
|
||||
|
@ -47,21 +47,21 @@ Setting | Description
|
|||
opensearch.username: kibanaserver
|
||||
opensearch.password: kibanaserver
|
||||
opensearch.requestHeadersWhitelist: ["securitytenant","Authorization"]
|
||||
plugins.security.multitenancy.enabled: true
|
||||
plugins.security.multitenancy.tenants.enable_global: true
|
||||
plugins.security.multitenancy.tenants.enable_private: true
|
||||
plugins.security.multitenancy.tenants.preferred: ["Private", "Global"]
|
||||
plugins.security.multitenancy.enable_filter: false
|
||||
opensearch_security.multitenancy.enabled: true
|
||||
opensearch_security.multitenancy.tenants.enable_global: true
|
||||
opensearch_security.multitenancy.tenants.enable_private: true
|
||||
opensearch_security.multitenancy.tenants.preferred: ["Private", "Global"]
|
||||
opensearch_security.multitenancy.enable_filter: false
|
||||
```
|
||||
|
||||
Setting | Description
|
||||
:--- | :---
|
||||
`opensearch.requestHeadersWhitelist` | OpenSearch Dashboards requires that you whitelist all HTTP headers that it passes to OpenSearch. Multi-tenancy uses a specific header, `securitytenant`, that must be present with the standard `Authorization` header. If the `securitytenant` header is not whitelisted, OpenSearch Dashboards starts with a red status.
|
||||
`plugins.security.multitenancy.enabled` | Enables or disables multi-tenancy in OpenSearch Dashboards. Default is true.
|
||||
`plugins.security.multitenancy.tenants.enable_global` | Enables or disables the global tenant. Default is true.
|
||||
`plugins.security.multitenancy.tenants.enable_private` | Enables or disables the private tenant. Default is true.
|
||||
`plugins.security.multitenancy.tenants.preferred` | Lets you change ordering in the **Tenants** tab of OpenSearch Dashboards. By default, the list starts with global and private (if enabled) and then proceeds alphabetically. You can add tenants here to move them to the top of the list.
|
||||
`plugins.security.multitenancy.enable_filter` | If you have many tenants, you can add a search bar to the top of the list. Default is false.
|
||||
`opensearch_security.multitenancy.enabled` | Enables or disables multi-tenancy in OpenSearch Dashboards. Default is true.
|
||||
`opensearch_security.multitenancy.tenants.enable_global` | Enables or disables the global tenant. Default is true.
|
||||
`opensearch_security.multitenancy.tenants.enable_private` | Enables or disables the private tenant. Default is true.
|
||||
`opensearch_security.multitenancy.tenants.preferred` | Lets you change ordering in the **Tenants** tab of OpenSearch Dashboards. By default, the list starts with global and private (if enabled) and then proceeds alphabetically. You can add tenants here to move them to the top of the list.
|
||||
`opensearch_security.multitenancy.enable_filter` | If you have many tenants, you can add a search bar to the top of the list. Default is false.
|
||||
|
||||
|
||||
## Add tenants
|
||||
|
|
|
@ -1,64 +1,135 @@
|
|||
---
|
||||
layout: default
|
||||
title: Permissions
|
||||
parent: Access Control
|
||||
parent: Access control
|
||||
nav_order: 50
|
||||
---
|
||||
|
||||
# Permissions
|
||||
|
||||
This page is a complete list of available permissions in the security plugin. Each permission controls access to a data type or API.
|
||||
Each permission in the security plugin controls access to some action that the OpenSearch cluster can perform, such as indexing a document or checking cluster health.
|
||||
|
||||
Rather than creating new action groups from individual permissions, you can often achieve your desired security posture using some combination of the default action groups. To learn more, see [Default Action Groups]({{site.url}}{{site.baseurl}}/security-plugin/access-control/default-action-groups/).
|
||||
Most permissions are self-describing. For example, `cluster:admin/ingest/pipeline/get` lets you retrieve information about ingest pipelines. _In many cases_, a permission correlates to a specific REST API operation, such as `GET _ingest/pipeline`.
|
||||
|
||||
Despite this correlation, permissions do **not** directly map to REST API operations. Operations such as `POST _bulk` and `GET _msearch` can access many indices and perform many actions in a single request. Even a simple request, such as `GET _cat/nodes`, performs several actions in order to generate its response.
|
||||
|
||||
In short, controlling access to the REST API is insufficient. Instead, the security plugin controls access to the underlying OpenSearch actions.
|
||||
|
||||
For example, consider the following `_bulk` request:
|
||||
|
||||
```json
|
||||
POST _bulk
|
||||
{ "delete": { "_index": "test-index", "_id": "tt2229499" } }
|
||||
{ "index": { "_index": "test-index", "_id": "tt1979320" } }
|
||||
{ "title": "Rush", "year": 2013 }
|
||||
{ "create": { "_index": "test-index", "_id": "tt1392214" } }
|
||||
{ "title": "Prisoners", "year": 2013 }
|
||||
{ "update": { "_index": "test-index", "_id": "tt0816711" } }
|
||||
{ "doc" : { "title": "World War Z" } }
|
||||
|
||||
```
|
||||
|
||||
For this request to succeed, you must have the following permissions for `test-index`:
|
||||
|
||||
- indices:data/write/bulk*
|
||||
- indices:data/write/delete
|
||||
- indices:data/write/index
|
||||
- indices:data/write/update
|
||||
|
||||
These permissions also allow you add, update, or delete documents (e.g. `PUT test-index/_doc/tt0816711`), because they govern the underlying OpenSearch actions of indexing and deleting documents rather than a specific API path and HTTP method.
|
||||
|
||||
|
||||
## Test permissions
|
||||
|
||||
If you want a user to have the absolute minimum set of permissions necessary to perform some function---the [principle of least privilege](https://en.wikipedia.org/wiki/Principle_of_least_privilege)----the best way is to send representative requests to your cluster as a new test user. In the case of a permissions error, the security plugin is very explicit about which permissions are missing. Consider this request and response:
|
||||
|
||||
```json
|
||||
GET _cat/shards?v
|
||||
|
||||
{
|
||||
"error": {
|
||||
"root_cause": [{
|
||||
"type": "security_exception",
|
||||
"reason": "no permissions for [indices:monitor/stats] and User [name=test-user, backend_roles=[], requestedTenant=null]"
|
||||
}]
|
||||
},
|
||||
"status": 403
|
||||
}
|
||||
```
|
||||
|
||||
[Create a user and a role]({{site.url}}{{site.baseurl}}/security-plugin/access-control/users-roles/), map the role to the user, and start sending signed requests using curl, Postman, or any other client. Then gradually add permissions to the role as you encounter errors. Even after you resolve one permissions error, the same request might generate new errors; the plugin only returns the first error it encounters, so keep trying until the request succeeds.
|
||||
|
||||
Rather than individual permissions, you can often achieve your desired security posture using a combination of the default action groups. See [Default action groups]({{site.url}}{{site.baseurl}}/security-plugin/access-control/default-action-groups/) for descriptions of the permissions that each group grants.
|
||||
{: .tip }
|
||||
|
||||
|
||||
## Cluster
|
||||
## Cluster permissions
|
||||
|
||||
These permissions are for the cluster and can't be applied granularly. For example, you either have permissions to take snapshots (`cluster:admin/snapshot/create`) or you don't. You can't have permissions to take snapshots only for certain indices.
|
||||
|
||||
- cluster:admin/ingest/pipeline/delete
|
||||
- cluster:admin/ingest/pipeline/get
|
||||
- cluster:admin/ingest/pipeline/put
|
||||
- cluster:admin/ingest/pipeline/simulate
|
||||
- cluster:admin/ingest/processor/grok/get
|
||||
- cluster:admin/opensearch/ad/detector/delete
|
||||
- cluster:admin/opensearch/ad/detector/jobmanagement
|
||||
- cluster:admin/opensearch/ad/detector/run
|
||||
- cluster:admin/opensearch/ad/detector/search
|
||||
- cluster:admin/opensearch/ad/detector/stats
|
||||
- cluster:admin/opensearch/ad/detector/write
|
||||
- cluster:admin/opensearch/ad/detectors/get
|
||||
- cluster:admin/opensearch/ad/result/search
|
||||
- cluster:admin/opensearch/alerting/alerts/ack
|
||||
- cluster:admin/opensearch/alerting/alerts/get
|
||||
- cluster:admin/opensearch/alerting/destination/delete
|
||||
- cluster:admin/opensearch/alerting/destination/email_account/delete
|
||||
- cluster:admin/opensearch/alerting/destination/email_account/get
|
||||
- cluster:admin/opensearch/alerting/destination/email_account/search
|
||||
- cluster:admin/opensearch/alerting/destination/email_account/write
|
||||
- cluster:admin/opensearch/alerting/destination/email_group/delete
|
||||
- cluster:admin/opensearch/alerting/destination/email_group/get
|
||||
- cluster:admin/opensearch/alerting/destination/email_group/search
|
||||
- cluster:admin/opensearch/alerting/destination/email_group/write
|
||||
- cluster:admin/opensearch/alerting/destination/get
|
||||
- cluster:admin/opensearch/alerting/destination/write
|
||||
- cluster:admin/opensearch/alerting/monitor/delete
|
||||
- cluster:admin/opensearch/alerting/monitor/execute
|
||||
- cluster:admin/opensearch/alerting/monitor/get
|
||||
- cluster:admin/opensearch/alerting/monitor/search
|
||||
- cluster:admin/opensearch/alerting/monitor/write
|
||||
- cluster:admin/opensearch/asynchronous_search/stats
|
||||
- cluster:admin/opensearch/asynchronous_search/delete
|
||||
- cluster:admin/opensearch/asynchronous_search/get
|
||||
- cluster:admin/opensearch/asynchronous_search/submit
|
||||
- cluster:admin/opensearch/reports/definition/create
|
||||
- cluster:admin/opensearch/reports/definition/delete
|
||||
- cluster:admin/opensearch/reports/definition/get
|
||||
- cluster:admin/opensearch/reports/definition/list
|
||||
- cluster:admin/opensearch/reports/definition/on_demand
|
||||
- cluster:admin/opensearch/reports/definition/update
|
||||
- cluster:admin/opensearch/reports/instance/get
|
||||
- cluster:admin/opensearch/reports/instance/list
|
||||
- cluster:admin/opensearch/reports/menu/download
|
||||
- cluster:admin/opendistro/ad/detector/delete
|
||||
- cluster:admin/opendistro/ad/detector/info
|
||||
- cluster:admin/opendistro/ad/detector/jobmanagement
|
||||
- cluster:admin/opendistro/ad/detector/preview
|
||||
- cluster:admin/opendistro/ad/detector/run
|
||||
- cluster:admin/opendistro/ad/detector/search
|
||||
- cluster:admin/opendistro/ad/detector/stats
|
||||
- cluster:admin/opendistro/ad/detector/write
|
||||
- cluster:admin/opendistro/ad/detectors/get
|
||||
- cluster:admin/opendistro/ad/result/search
|
||||
- cluster:admin/opendistro/ad/tasks/search
|
||||
- cluster:admin/opendistro/alerting/alerts/ack (acknowledge)
|
||||
- cluster:admin/opendistro/alerting/alerts/get
|
||||
- cluster:admin/opendistro/alerting/destination/delete
|
||||
- cluster:admin/opendistro/alerting/destination/email_account/delete
|
||||
- cluster:admin/opendistro/alerting/destination/email_account/get
|
||||
- cluster:admin/opendistro/alerting/destination/email_account/search
|
||||
- cluster:admin/opendistro/alerting/destination/email_account/write
|
||||
- cluster:admin/opendistro/alerting/destination/email_group/delete
|
||||
- cluster:admin/opendistro/alerting/destination/email_group/get
|
||||
- cluster:admin/opendistro/alerting/destination/email_group/search
|
||||
- cluster:admin/opendistro/alerting/destination/email_group/write
|
||||
- cluster:admin/opendistro/alerting/destination/get
|
||||
- cluster:admin/opendistro/alerting/destination/write
|
||||
- cluster:admin/opendistro/alerting/monitor/delete
|
||||
- cluster:admin/opendistro/alerting/monitor/execute
|
||||
- cluster:admin/opendistro/alerting/monitor/get
|
||||
- cluster:admin/opendistro/alerting/monitor/search
|
||||
- cluster:admin/opendistro/alerting/monitor/write
|
||||
- cluster:admin/opendistro/asynchronous_search/stats
|
||||
- cluster:admin/opendistro/asynchronous_search/delete
|
||||
- cluster:admin/opendistro/asynchronous_search/get
|
||||
- cluster:admin/opendistro/asynchronous_search/submit
|
||||
- cluster:admin/opendistro/ism/managedindex/add
|
||||
- cluster:admin/opendistro/ism/managedindex/change
|
||||
- cluster:admin/opendistro/ism/managedindex/remove
|
||||
- cluster:admin/opendistro/ism/managedindex/explain
|
||||
- cluster:admin/opendistro/ism/managedindex/retry
|
||||
- cluster:admin/opendistro/ism/policy/write
|
||||
- cluster:admin/opendistro/ism/policy/get
|
||||
- cluster:admin/opendistro/ism/policy/search
|
||||
- cluster:admin/opendistro/ism/policy/delete
|
||||
- cluster:admin/opendistro/rollup/index
|
||||
- cluster:admin/opendistro/rollup/get
|
||||
- cluster:admin/opendistro/rollup/search
|
||||
- cluster:admin/opendistro/rollup/delete
|
||||
- cluster:admin/opendistro/rollup/start
|
||||
- cluster:admin/opendistro/rollup/stop
|
||||
- cluster:admin/opendistro/rollup/explain
|
||||
- cluster:admin/opendistro/reports/definition/create
|
||||
- cluster:admin/opendistro/reports/definition/update
|
||||
- cluster:admin/opendistro/reports/definition/on_demand
|
||||
- cluster:admin/opendistro/reports/definition/delete
|
||||
- cluster:admin/opendistro/reports/definition/get
|
||||
- cluster:admin/opendistro/reports/definition/list
|
||||
- cluster:admin/opendistro/reports/instance/list
|
||||
- cluster:admin/opendistro/reports/instance/get
|
||||
- cluster:admin/opendistro/reports/menu/download
|
||||
- cluster:admin/plugins/replication/autofollow/update
|
||||
- cluster:admin/reindex/rethrottle
|
||||
- cluster:admin/repository/delete
|
||||
|
@ -95,7 +166,9 @@ Rather than creating new action groups from individual permissions, you can ofte
|
|||
- cluster:monitor/tasks/list
|
||||
|
||||
|
||||
## Indices
|
||||
## Index permissions
|
||||
|
||||
These permissions apply to an index or index pattern. You might want a user to have read access to all indices (i.e. `*`), but write access to only a few (e.g. `web-logs` and `product-catalog`).
|
||||
|
||||
- indices:admin/aliases
|
||||
- indices:admin/aliases/exists
|
||||
|
@ -103,13 +176,22 @@ Rather than creating new action groups from individual permissions, you can ofte
|
|||
- indices:admin/analyze
|
||||
- indices:admin/cache/clear
|
||||
- indices:admin/close
|
||||
- indices:admin/create
|
||||
- indices:admin/delete
|
||||
- indices:admin/close*
|
||||
- indices:admin/create (create indices)
|
||||
- indices:admin/data_stream/create
|
||||
- indices:admin/data_stream/delete
|
||||
- indices:admin/data_stream/get
|
||||
- indices:admin/delete (delete indices)
|
||||
- indices:admin/exists
|
||||
- indices:admin/flush
|
||||
- indices:admin/flush*
|
||||
- indices:admin/forcemerge
|
||||
- indices:admin/get
|
||||
- indices:admin/get (retrieve index and mapping)
|
||||
- indices:admin/index_template/delete
|
||||
- indices:admin/index_template/get
|
||||
- indices:admin/index_template/put
|
||||
- indices:admin/index_template/simulate
|
||||
- indices:admin/index_template/simulate_index
|
||||
- indices:admin/mapping/put
|
||||
- indices:admin/mappings/fields/get
|
||||
- indices:admin/mappings/fields/get*
|
||||
|
@ -145,7 +227,7 @@ Rather than creating new action groups from individual permissions, you can ofte
|
|||
- indices:data/read/mget*
|
||||
- indices:data/read/msearch
|
||||
- indices:data/read/msearch/template
|
||||
- indices:data/read/mtv
|
||||
- indices:data/read/mtv (multi-term vectors)
|
||||
- indices:data/read/mtv*
|
||||
- indices:data/read/plugins/replication/file_chunk
|
||||
- indices:data/read/plugins/replication/changes
|
||||
|
@ -154,16 +236,17 @@ Rather than creating new action groups from individual permissions, you can ofte
|
|||
- indices:data/read/search
|
||||
- indices:data/read/search*
|
||||
- indices:data/read/search/template
|
||||
- indices:data/read/tv
|
||||
- indices:data/read/tv (term vectors)
|
||||
- indices:data/write/bulk
|
||||
- indices:data/write/bulk*
|
||||
- indices:data/write/delete
|
||||
- indices:data/write/delete (delete documents)
|
||||
- indices:data/write/delete/byquery
|
||||
- indices:data/write/index
|
||||
- indices:data/write/plugins/replication/changes
|
||||
- indices:data/write/index (add documents to existing indices)
|
||||
- indices:data/write/reindex
|
||||
- indices:data/write/update
|
||||
- indices:data/write/update/byquery
|
||||
- indices:monitor/data_stream/stats
|
||||
- indices:monitor/recovery
|
||||
- indices:monitor/segments
|
||||
- indices:monitor/settings/get
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
layout: default
|
||||
title: Users and Roles
|
||||
parent: Access Control
|
||||
title: Users and roles
|
||||
parent: Access control
|
||||
nav_order: 1
|
||||
---
|
||||
|
||||
|
@ -111,7 +111,7 @@ Role | Description
|
|||
`all_access` | Grants full access to the cluster: all cluster-wide operations, write to all indices, write to all tenants.
|
||||
`cross_cluster_replication_follower_full_access` | Grants full access to perform cross-cluster replication actions on the follower cluster.
|
||||
`cross_cluster_replication_leader_full_access` | Grants full access to perform cross-cluster replication actions on the leader cluster.
|
||||
`kibana_read_only` | A special role that prevents users from making changes to visualizations, dashboards, and other OpenSearch Dashboards objects. See `plugins.security.readonly_mode.roles` in `opensearch_dashboards.yml`. Pair with the `kibana_user` role.
|
||||
`kibana_read_only` | A special role that prevents users from making changes to visualizations, dashboards, and other OpenSearch Dashboards objects. See `opensearch_security.readonly_mode.roles` in `opensearch_dashboards.yml`. Pair with the `kibana_user` role.
|
||||
`kibana_user` | Grants permissions to use OpenSearch Dashboards: cluster-wide searches, index monitoring, and write to various OpenSearch Dashboards indices.
|
||||
`logstash` | Grants permissions for Logstash to interact with the cluster: cluster-wide searches, cluster monitoring, and write to the various Logstash indices.
|
||||
`manage_snapshots` | Grants permissions to manage snapshot repositories, take snapshots, and restore snapshots.
|
||||
|
|
|
@ -42,10 +42,10 @@ You can optionally add the `-aes256` option to encrypt the key using the AES-256
|
|||
Next, use the key to generate a self-signed certificate for the root CA:
|
||||
|
||||
```bash
|
||||
openssl req -new -x509 -sha256 -key root-ca-key.pem -out root-ca.pem -days 30
|
||||
openssl req -new -x509 -sha256 -key root-ca-key.pem -out root-ca.pem -days 730
|
||||
```
|
||||
|
||||
Change `-days 30` to 3650 (10 years) or some other number to set a non-default expiration date. The default value of 30 days is best for testing purposes.
|
||||
The default `-days` value of 30 is only useful for testing purposes. This sample command specifies 730 (two years) for the certificate expiration date, but use whatever value makes sense for your organization.
|
||||
|
||||
- The `-x509` option specifies that you want a self-signed certificate rather than a certificate request.
|
||||
- The `-sha256` option sets the hash algorithm to SHA-256. SHA-256 is the default in later versions of OpenSSL, but earlier versions might use SHA-1.
|
||||
|
@ -78,7 +78,7 @@ Follow the prompts to fill in the details. You don't need to specify a challenge
|
|||
Finally, generate the certificate itself:
|
||||
|
||||
```bash
|
||||
openssl x509 -req -in admin.csr -CA root-ca.pem -CAkey root-ca-key.pem -CAcreateserial -sha256 -out admin.pem -days 30
|
||||
openssl x509 -req -in admin.csr -CA root-ca.pem -CAkey root-ca-key.pem -CAcreateserial -sha256 -out admin.pem -days 730
|
||||
```
|
||||
|
||||
Just like the root certificate, use the `-days` option to specify an expiration date of longer than 30 days.
|
||||
|
@ -91,7 +91,7 @@ Follow the steps in [Generate an admin certificate](#generate-an-admin-certifica
|
|||
If you generate node certificates and have `plugins.security.ssl.transport.enforce_hostname_verification` set to `true` (default), be sure to specify a common name (CN) for the certificate that matches the hostname of the intended node. If you want to use the same node certificate on all nodes (not recommended), set hostname verification to `false`. For more information, see [Configure TLS certificates]({{site.url}}{{site.baseurl}}/security-plugin/configuration/tls#advanced-hostname-verification-and-dns-lookup).
|
||||
|
||||
|
||||
### Sample script
|
||||
## Sample script
|
||||
|
||||
If you already know the certificate details and don't want to specify them interactively, use the `-subj` option in your `root-ca.pem` and CSR commands. This script creates a root certificate, admin certificate, two node certificates, and a client certificate, all with an expiration dates of two years (730 days):
|
||||
|
||||
|
|
|
@ -11,7 +11,6 @@ Active Directory and LDAP can be used for both authentication and authorization
|
|||
|
||||
In most cases, you want to configure both authentication and authorization. You can also use authentication only and map the users retrieved from LDAP directly to security plugin roles.
|
||||
|
||||
{% comment %}
|
||||
|
||||
## Docker example
|
||||
|
||||
|
@ -38,7 +37,7 @@ We provide a fully functional example that can help you understand how to use an
|
|||
1. Index a document as `psantos`:
|
||||
|
||||
```bash
|
||||
curl -XPUT https://localhost:9200/new-index/_doc/1 -H 'Content-Type: application/json' -d '{"title": "Spirited Away"}' -u psantos:password -k
|
||||
curl -XPUT 'https://localhost:9200/new-index/_doc/1' -H 'Content-Type: application/json' -d '{"title": "Spirited Away"}' -u 'psantos:password' -k
|
||||
```
|
||||
|
||||
If you try the same request as `jroe`, it fails. The `Developers` group is mapped to the `readall`, `manage_snapshots`, and `kibana_user` roles and has no write permissions.
|
||||
|
@ -46,14 +45,13 @@ We provide a fully functional example that can help you understand how to use an
|
|||
1. Search for the document as `jroe`:
|
||||
|
||||
```bash
|
||||
curl -XGET https://localhost:9200/new-index/_search?pretty -u jroe:password -k
|
||||
curl -XGET 'https://localhost:9200/new-index/_search?pretty' -u 'jroe:password' -k
|
||||
```
|
||||
|
||||
This request succeeds, because the `Developers` group is mapped to the `readall` role.
|
||||
|
||||
1. If you want to examine the contents of the various containers, run `docker ps` to find the container ID and then `docker exec -it <container-id> /bin/bash`.
|
||||
|
||||
{% endcomment %}
|
||||
|
||||
## Connection settings
|
||||
|
||||
|
|
|
@ -11,7 +11,6 @@ The security plugin supports user authentication through SAML single sign-on. Th
|
|||
|
||||
This profile is meant for use with web browsers. It is not a general-purpose way of authenticating users against the security plugin, so its primary use case is to support OpenSearch Dashboards single sign-on.
|
||||
|
||||
{% comment %}
|
||||
|
||||
## Docker example
|
||||
|
||||
|
@ -35,7 +34,6 @@ We provide a fully functional example that can help you understand how to use SA
|
|||
|
||||
In particular, you might find it helpful to review the contents of the `/var/www/simplesamlphp/config/` and `/var/www/simplesamlphp/metadata/` directories.
|
||||
|
||||
{% endcomment %}
|
||||
|
||||
## Activating SAML
|
||||
|
||||
|
@ -300,13 +298,13 @@ authc:
|
|||
|
||||
Because most of the SAML-specific configuration is done in the security plugin, just activate SAML in your `opensearch_dashboards.yml` by adding the following:
|
||||
|
||||
```
|
||||
plugins.security.auth.type: "saml"
|
||||
```yml
|
||||
opensearch_security.auth.type: "saml"
|
||||
```
|
||||
|
||||
In addition, the OpenSearch Dashboards endpoint for validating the SAML assertions must be whitelisted:
|
||||
|
||||
```
|
||||
```yml
|
||||
server.xsrf.whitelist: ["/_plugins/_security/saml/acs"]
|
||||
```
|
||||
|
||||
|
|
|
@ -126,7 +126,7 @@ plugins.security.restapi.password_validation_error_message: "Password must be mi
|
|||
|
||||
## whitelist.yml
|
||||
|
||||
You can use `whitelist.yml` to allow list any endpoints and HTTP requests. If enabled, all users except the SuperAdmin are allowed access to only the specified endpoints and HTTP requests, and all other HTTP requests associated with the endpoint are denied. For example, if GET `_cluster/settings` is allow listed, users cannot submit PUT requests to `_cluster/settings` to update cluster settings.
|
||||
You can use `whitelist.yml` to add any endpoints and HTTP requests to a list of allowed endpoints and requests. If enabled, all users except the super admin are allowed access to only the specified endpoints and HTTP requests, and all other HTTP requests associated with the endpoint are denied. For example, if GET `_cluster/settings` is added to the allow list, users cannot submit PUT requests to `_cluster/settings` to update cluster settings.
|
||||
|
||||
Note that while you can configure access to endpoints this way, for most cases, it is still best to configure permissions using the security plugin's users and roles, which have more granular settings.
|
||||
|
||||
|
@ -165,7 +165,7 @@ requests:
|
|||
- PUT
|
||||
```
|
||||
|
||||
You can also allow list custom indices. `whitelist.yml` doesn't support wildcards, so you must manually specify all of the indices you want to allow list.
|
||||
You can also add custom indices to the allow list. `whitelist.yml` doesn't support wildcards, so you must manually specify all of the indices you want to add.
|
||||
|
||||
```yml
|
||||
requests: # Only allow GET requests to /sample-index1/_doc/1 and /sample-index2/_doc/1
|
||||
|
@ -315,6 +315,10 @@ _meta:
|
|||
|
||||
## tenants.yml
|
||||
|
||||
You can use this file to specify and add any number of OpenSearch Dashboards tenants to your OpenSearch cluster. For more information about tenants, see [OpenSearch Dashboards multi-tenancy]({{site.url}}{{site.baseurl}}/security-plugin/access-control/multi-tenancy).
|
||||
|
||||
Like all of the other YAML files, we recommend you use `tenants.yml` to add any tenants you must have in your cluster, and then use OpenSearch Dashboards or the [REST API]({{site.url}}{{site.baseurl}}/security-plugin/access-control/api/#tenants) if you need to further configure or create any other tenants.
|
||||
|
||||
```yml
|
||||
---
|
||||
_meta:
|
||||
|
@ -325,9 +329,12 @@ admin_tenant:
|
|||
description: "Demo tenant for admin user"
|
||||
```
|
||||
|
||||
|
||||
## nodes_dn.yml
|
||||
|
||||
`nodes_dn.yml` lets you add certificates' [distinguished names (DNs)]({{site.url}}{{site.baseurl}}/security-plugin/configuration/generate-certificates/#add-distinguished-names-to-opensearchyml) an allow list to enable communication between any number of nodes and/or clusters. For example, a node that has the DN `CN=node1.example.com` in its allow list accepts communication from any other node or certificate that uses that DN.
|
||||
|
||||
The DNs get indexed into a [system index]({{site.url}}{{site.baseurl}}/security-plugin/configuration/system-indices) that only a super admin or an admin with a Transport Layer Security (TLS) certificate can access. If you want to programmatically add DNs to your allow lists, use the [REST API]({{site.url}}{{site.baseurl}}/security-plugin/access-control/api/#distinguished-names).
|
||||
|
||||
```yml
|
||||
---
|
||||
_meta:
|
||||
|
|
|
@ -11,16 +11,32 @@ redirect_from: /troubleshoot/
|
|||
This page contains a list of common issues and workarounds.
|
||||
|
||||
|
||||
## Java error during startup
|
||||
|
||||
You might see `[ERROR][c.a.o.s.s.t.OpenSearchSecuritySSLNettyTransport] [opensearch-node1] SSL Problem Insufficient buffer remaining for AEAD cipher fragment (2). Needs to be more than tag size (16)` when starting OpenSearch. This problem is a [known issue with Java](https://bugs.openjdk.java.net/browse/JDK-8221218) and doesn't affect the operation of the cluster.
|
||||
|
||||
|
||||
## OpenSearch Dashboards fails to start
|
||||
|
||||
If you encounter the error `FATAL Error: Request Timeout after 30000ms` during startup, try running OpenSearch Dashboards on a more powerful machine. We recommend four CPU cores and 8 GB of RAM.
|
||||
|
||||
|
||||
## Multi-tenancy issues in OpenSearch Dashboards
|
||||
|
||||
If you're testing multiple users in OpenSearch Dashboards and encounter unexpected changes in tenant, use Google Chrome in an Incognito window or Firefox in a Private window.
|
||||
|
||||
|
||||
## Expired certificates
|
||||
|
||||
If your certificates have expired, you might receive the following error or something similar:
|
||||
|
||||
```
|
||||
ERROR org.opensearch.security.ssl.transport.SecuritySSLNettyTransport - Exception during establishing a SSL connection: javax.net.ssl.SSLHandshakeException: PKIX path validation failed: java.security.cert.CertPathValidatorException: validity check failed
|
||||
Caused by: java.security.cert.CertificateExpiredException: NotAfter: Thu Sep 16 11:27:55 PDT 2021
|
||||
```
|
||||
|
||||
To check the expiration date for a certificate, run this command:
|
||||
|
||||
```bash
|
||||
openssl x509 -enddate -noout -in <certificate>
|
||||
```
|
||||
|
||||
|
||||
## Encryption at rest
|
||||
|
||||
The operating system for each OpenSearch node handles encryption of data at rest. To enable encryption at rest in most Linux distributions, use the `cryptsetup` command:
|
||||
|
@ -85,8 +101,3 @@ The security plugin blocks the update by script operation (`POST <index>/_update
|
|||
## Illegal reflective access operation in logs
|
||||
|
||||
This is a known issue with Performance Analyzer that shouldn't affect functionality.
|
||||
|
||||
|
||||
## Multi-tenancy issues in OpenSearch Dashboards
|
||||
|
||||
If you're testing multiple users in OpenSearch Dashboards and encounter unexpected changes in tenant, use Google Chrome in an Incognito window or Firefox in a Private window.
|
||||
|
|
|
@ -144,6 +144,12 @@ If you are upgrading an Open Distro for Elasticsearch cluster, we recommend firs
|
|||
|
||||
1. Port your settings from `elasticsearch.yml` to `opensearch.yml`. Most settings use the same names. At a minimum, specify `cluster.name`, `node.name`, `discovery.seed_hosts`, and `cluster.initial_master_nodes`.
|
||||
|
||||
1. (Optional) If you're actively connecting to the cluster with legacy clients that check for a particular version number, such as Logstash OSS, add a [compatibility setting]({{site.url}}{{site.baseurl}}/clients/agents-and-ingestion-tools/) to `opensearch.yml`:
|
||||
|
||||
```yml
|
||||
compatibility.override_main_response_version: true
|
||||
```
|
||||
|
||||
1. (Optional) Add your certificates to your `config` directory, add them to `opensearch.yml`, and initialize the security plugin.
|
||||
|
||||
1. Start OpenSearch on the node (rolling) or all nodes (cluster restart).
|
||||
|
|
Binary file not shown.
Binary file not shown.
2
index.md
2
index.md
|
@ -37,7 +37,7 @@ Component | Purpose
|
|||
[Anomaly Detection]({{site.url}}{{site.baseurl}}/monitoring-plugins/ad/) | Identify atypical data and receive automatic notifications
|
||||
[Asynchronous Search]({{site.url}}{{site.baseurl}}/search-plugins/async/) | Run search requests in the background
|
||||
|
||||
Most of OpenSearch plugins have a corresponding OpenSearch Dashboards plugin that provide a convenient, unified user interface.
|
||||
Most OpenSearch plugins have corresponding OpenSearch Dashboards plugins that provide a convenient, unified user interface.
|
||||
|
||||
For specifics around the project, see the [FAQ](https://opensearch.org/faq/).
|
||||
|
||||
|
|
Loading…
Reference in New Issue