diff --git a/_ml-commons-plugin/api/model-apis/get-model.md b/_ml-commons-plugin/api/model-apis/get-model.md index 454aff27..73513721 100644 --- a/_ml-commons-plugin/api/model-apis/get-model.md +++ b/_ml-commons-plugin/api/model-apis/get-model.md @@ -104,6 +104,86 @@ POST /_plugins/_ml/models/_search ``` {% include copy-curl.html %} +#### Example: Excluding model chunks + +```json +GET /_plugins/_ml/models/_search +{ + "query": { + "bool": { + "must_not": { + "exists": { + "field": "chunk_number" + } + } + } + }, + "sort": [ + { + "created_time": { + "order": "desc" + } + } + ] +} +``` +{% include copy-curl.html %} + +#### Example: Searching for all model chunks + +The following query searches for all chunks of the model with the ID `979y9YwBjWKCe6KgNGTm` and sorts the chunks in ascending order: + +```json +GET /_plugins/_ml/models/_search +{ + "query": { + "bool": { + "filter": [ + { + "term": { + "model_id": "9r9w9YwBjWKCe6KgyGST" + } + } + ] + } + }, + "sort": [ + { + "chunk_number": { + "order": "asc" + } + } + ] +} +``` +{% include copy-curl.html %} + +#### Example: Searching for a model by description + +```json +GET _plugins/_ml/models/_search +{ + "query": { + "bool": { + "should": [ + { + "match": { + "description": "sentence transformer" + } + } + ], + "must_not": { + "exists": { + "field": "chunk_number" + } + } + } + }, + "size": 1000 +} +``` +{% include copy-curl.html %} + #### Example response ```json diff --git a/_ml-commons-plugin/cluster-settings.md b/_ml-commons-plugin/cluster-settings.md index cae56eea..5bf1c135 100644 --- a/_ml-commons-plugin/cluster-settings.md +++ b/_ml-commons-plugin/cluster-settings.md @@ -20,10 +20,16 @@ By default, ML tasks and models only run on ML nodes. When configured without th node.roles: [ ml ] ``` +### Setting up a cluster with a dedicated ML node + +To set up a cluster with a dedicated ML node, see the sample [Docker compose file](https://github.com/opensearch-project/ml-commons/blob/main/docs/docker/docker-compose.yml). + ## Run tasks and models on ML nodes only If `true`, ML Commons tasks and models run ML tasks on ML nodes only. If `false`, tasks and models run on ML nodes first. If no ML nodes exist, tasks and models run on data nodes. +We suggest running ML workloads on a dedicated ML node rather than on data nodes. Starting with OpenSearch 2.5, ML tasks run on ML nodes only by default. To test models on a data node, set `plugins.ml_commons.only_run_on_ml_node` to `false`. + We recommend setting `plugins.ml_commons.only_run_on_ml_node` to `true` on production clusters. {: .tip} @@ -220,6 +226,8 @@ Sets a circuit breaker that checks all system memory usage before running an ML Values are based on the percentage of memory available. When set to `0`, no ML tasks will run. When set to `100`, the circuit breaker closes and no threshold exists. +Starting with OpenSearch 2.5, ML Commons runs a native memory circuit breaker to avoid an out-of-memory error when loading too many models. By default, the native memory threshold is 90%. If memory usage exceeds the threshold, ML Commons returns an error. For testing purposes, you can disable the circuit breaker by setting `plugins.ml_commons.native_memory_threshold` to 100. + ### Setting ```