We discussed recently that the cluster state API should be considered "internal" and therefore our usual cast-iron stability guarantees do not hold for this API. However, there are a good number of REST tests that try to identify the master node. Today they call `GET /_cluster/state` API and extract the master node ID from the response. In fact many of these tests just want an arbitary node ID (or perhaps a data node ID) so an alternative is to call `GET _nodes` or `GET _nodes/data:true` and obtain a node ID from the keys of the `nodes` map in the response. This change adds the ability for YAML-based REST tests to extract an arbitrary key from a map so that they can obtain a node ID from the nodes info API instead of using the master node ID from the cluster state API. Relates #40047.
This commit is contained in:
parent
23395a9b9f
commit
5a2ba34174
|
@ -12,10 +12,6 @@
|
||||||
|
|
||||||
- match: { acknowledged: true }
|
- match: { acknowledged: true }
|
||||||
|
|
||||||
- do:
|
|
||||||
cluster.state:
|
|
||||||
metric: [ master_node ]
|
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
cluster.allocation_explain:
|
cluster.allocation_explain:
|
||||||
body: { "index": "test", "shard": 0, "primary": true }
|
body: { "index": "test", "shard": 0, "primary": true }
|
||||||
|
@ -37,10 +33,6 @@
|
||||||
index: test
|
index: test
|
||||||
body: { "settings": { "index.number_of_shards": 1, "index.number_of_replicas": 9 } }
|
body: { "settings": { "index.number_of_shards": 1, "index.number_of_replicas": 9 } }
|
||||||
|
|
||||||
- do:
|
|
||||||
cluster.state:
|
|
||||||
metric: [ master_node ]
|
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
cluster.allocation_explain:
|
cluster.allocation_explain:
|
||||||
include_disk_info: true
|
include_disk_info: true
|
||||||
|
|
|
@ -25,12 +25,14 @@ setup:
|
||||||
|
|
||||||
---
|
---
|
||||||
"Explain API for non-existent node & shard":
|
"Explain API for non-existent node & shard":
|
||||||
|
- skip:
|
||||||
|
features: [arbitrary_key]
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
cluster.state:
|
nodes.info:
|
||||||
metric: [ master_node ]
|
node_id: data:true
|
||||||
|
- set:
|
||||||
- set: {master_node: node_id}
|
nodes._arbitrary_key_: node_id
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
cluster.reroute:
|
cluster.reroute:
|
||||||
|
|
|
@ -3,18 +3,20 @@
|
||||||
- skip:
|
- skip:
|
||||||
version: " - 6.9.99"
|
version: " - 6.9.99"
|
||||||
reason: expects warnings that pre-7.0.0 will not send
|
reason: expects warnings that pre-7.0.0 will not send
|
||||||
features: "warnings"
|
features: [warnings, arbitrary_key]
|
||||||
# creates an index with one document solely allocated on the master node
|
|
||||||
|
# creates an index with one document solely allocated on a particular data node
|
||||||
# and shrinks it into a new index with a single shard
|
# and shrinks it into a new index with a single shard
|
||||||
# we don't do the relocation to a single node after the index is created
|
# we don't do the relocation to a single node after the index is created
|
||||||
# here since in a mixed version cluster we can't identify
|
# here since in a mixed version cluster we can't identify
|
||||||
# which node is the one with the highest version and that is the only one that can safely
|
# which node is the one with the highest version and that is the only one that can safely
|
||||||
# be used to shrink the index.
|
# be used to shrink the index.
|
||||||
- do:
|
|
||||||
cluster.state: {}
|
|
||||||
# Get master node id
|
|
||||||
|
|
||||||
- set: { master_node: master }
|
- do:
|
||||||
|
nodes.info:
|
||||||
|
node_id: data:true
|
||||||
|
- set:
|
||||||
|
nodes._arbitrary_key_: node_id
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
indices.create:
|
indices.create:
|
||||||
|
@ -22,8 +24,8 @@
|
||||||
wait_for_active_shards: 1
|
wait_for_active_shards: 1
|
||||||
body:
|
body:
|
||||||
settings:
|
settings:
|
||||||
# ensure everything is allocated on a single node
|
# ensure everything is allocated on the same data node
|
||||||
index.routing.allocation.include._id: $master
|
index.routing.allocation.include._id: $node_id
|
||||||
index.number_of_shards: 2
|
index.number_of_shards: 2
|
||||||
index.number_of_replicas: 0
|
index.number_of_replicas: 0
|
||||||
- do:
|
- do:
|
||||||
|
|
|
@ -3,13 +3,13 @@
|
||||||
- skip:
|
- skip:
|
||||||
version: " - 6.9.99"
|
version: " - 6.9.99"
|
||||||
reason: expects warnings that pre-7.0.0 will not send
|
reason: expects warnings that pre-7.0.0 will not send
|
||||||
features: "warnings"
|
features: [warnings, arbitrary_key]
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
cluster.state: {}
|
nodes.info:
|
||||||
# Get master node id
|
node_id: data:true
|
||||||
|
- set:
|
||||||
- set: { master_node: master }
|
nodes._arbitrary_key_: node_id
|
||||||
|
|
||||||
# create index
|
# create index
|
||||||
- do:
|
- do:
|
||||||
|
@ -19,7 +19,7 @@
|
||||||
body:
|
body:
|
||||||
settings:
|
settings:
|
||||||
# ensure everything is allocated on a single node
|
# ensure everything is allocated on a single node
|
||||||
index.routing.allocation.include._id: $master
|
index.routing.allocation.include._id: $node_id
|
||||||
index.number_of_shards: 2
|
index.number_of_shards: 2
|
||||||
index.number_of_replicas: 0
|
index.number_of_replicas: 0
|
||||||
mappings:
|
mappings:
|
||||||
|
|
|
@ -3,13 +3,13 @@
|
||||||
- skip:
|
- skip:
|
||||||
version: " - 6.9.99"
|
version: " - 6.9.99"
|
||||||
reason: expects warnings that pre-7.0.0 will not send
|
reason: expects warnings that pre-7.0.0 will not send
|
||||||
features: "warnings"
|
features: [warnings, arbitrary_key]
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
cluster.state: {}
|
nodes.info:
|
||||||
|
node_id: data:true
|
||||||
# get master node id
|
- set:
|
||||||
- set: { master_node: master }
|
nodes._arbitrary_key_: node_id
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
indices.create:
|
indices.create:
|
||||||
|
@ -17,8 +17,8 @@
|
||||||
wait_for_active_shards: 1
|
wait_for_active_shards: 1
|
||||||
body:
|
body:
|
||||||
settings:
|
settings:
|
||||||
# ensure everything is allocated on the master node
|
# ensure everything is allocated on the same node
|
||||||
index.routing.allocation.include._id: $master
|
index.routing.allocation.include._id: $node_id
|
||||||
index.number_of_shards: 2
|
index.number_of_shards: 2
|
||||||
index.number_of_replicas: 0
|
index.number_of_replicas: 0
|
||||||
index.merge.scheduler.max_merge_count: 4
|
index.merge.scheduler.max_merge_count: 4
|
||||||
|
@ -63,7 +63,7 @@
|
||||||
- match: { copy-settings-target.settings.index.merge.scheduler.max_merge_count: "4" }
|
- match: { copy-settings-target.settings.index.merge.scheduler.max_merge_count: "4" }
|
||||||
- match: { copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" }
|
- match: { copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" }
|
||||||
- match: { copy-settings-target.settings.index.blocks.write: "true" }
|
- match: { copy-settings-target.settings.index.blocks.write: "true" }
|
||||||
- match: { copy-settings-target.settings.index.routing.allocation.include._id: $master }
|
- match: { copy-settings-target.settings.index.routing.allocation.include._id: $node_id }
|
||||||
|
|
||||||
# now we do a actual shrink and copy settings (by default)
|
# now we do a actual shrink and copy settings (by default)
|
||||||
- do:
|
- do:
|
||||||
|
@ -89,7 +89,7 @@
|
||||||
- match: { default-copy-settings-target.settings.index.merge.scheduler.max_merge_count: "4" }
|
- match: { default-copy-settings-target.settings.index.merge.scheduler.max_merge_count: "4" }
|
||||||
- match: { default-copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" }
|
- match: { default-copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" }
|
||||||
- match: { default-copy-settings-target.settings.index.blocks.write: "true" }
|
- match: { default-copy-settings-target.settings.index.blocks.write: "true" }
|
||||||
- match: { default-copy-settings-target.settings.index.routing.allocation.include._id: $master }
|
- match: { default-copy-settings-target.settings.index.routing.allocation.include._id: $node_id }
|
||||||
|
|
||||||
# now we do a actual shrink and try to set no copy settings
|
# now we do a actual shrink and try to set no copy settings
|
||||||
- do:
|
- do:
|
||||||
|
|
|
@ -3,13 +3,13 @@
|
||||||
- skip:
|
- skip:
|
||||||
version: " - 6.9.99"
|
version: " - 6.9.99"
|
||||||
reason: expects warnings that pre-7.0.0 will not send
|
reason: expects warnings that pre-7.0.0 will not send
|
||||||
features: "warnings"
|
features: [arbitrary_key, warnings]
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
cluster.state: {}
|
nodes.info:
|
||||||
|
node_id: data:true
|
||||||
# get master node id
|
- set:
|
||||||
- set: { master_node: master }
|
nodes._arbitrary_key_: node_id
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
indices.create:
|
indices.create:
|
||||||
|
@ -17,8 +17,8 @@
|
||||||
wait_for_active_shards: 1
|
wait_for_active_shards: 1
|
||||||
body:
|
body:
|
||||||
settings:
|
settings:
|
||||||
# ensure everything is allocated on the master node
|
# ensure everything is allocated on the same node
|
||||||
index.routing.allocation.include._id: $master
|
index.routing.allocation.include._id: $node_id
|
||||||
index.number_of_replicas: 0
|
index.number_of_replicas: 0
|
||||||
index.number_of_shards: 1
|
index.number_of_shards: 1
|
||||||
index.number_of_routing_shards: 4
|
index.number_of_routing_shards: 4
|
||||||
|
@ -66,7 +66,7 @@
|
||||||
- match: { copy-settings-target.settings.index.merge.scheduler.max_merge_count: "4" }
|
- match: { copy-settings-target.settings.index.merge.scheduler.max_merge_count: "4" }
|
||||||
- match: { copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" }
|
- match: { copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" }
|
||||||
- match: { copy-settings-target.settings.index.blocks.write: "true" }
|
- match: { copy-settings-target.settings.index.blocks.write: "true" }
|
||||||
- match: { copy-settings-target.settings.index.routing.allocation.include._id: $master }
|
- match: { copy-settings-target.settings.index.routing.allocation.include._id: $node_id }
|
||||||
|
|
||||||
# now we do a actual shrink and copy settings (by default)
|
# now we do a actual shrink and copy settings (by default)
|
||||||
- do:
|
- do:
|
||||||
|
@ -93,7 +93,7 @@
|
||||||
- match: { default-copy-settings-target.settings.index.merge.scheduler.max_merge_count: "4" }
|
- match: { default-copy-settings-target.settings.index.merge.scheduler.max_merge_count: "4" }
|
||||||
- match: { default-copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" }
|
- match: { default-copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" }
|
||||||
- match: { default-copy-settings-target.settings.index.blocks.write: "true" }
|
- match: { default-copy-settings-target.settings.index.blocks.write: "true" }
|
||||||
- match: { default-copy-settings-target.settings.index.routing.allocation.include._id: $master }
|
- match: { default-copy-settings-target.settings.index.routing.allocation.include._id: $node_id }
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
catch: /illegal_argument_exception/
|
catch: /illegal_argument_exception/
|
||||||
|
|
|
@ -1,14 +1,13 @@
|
||||||
|
setup:
|
||||||
|
- skip:
|
||||||
|
features: [arbitrary_key]
|
||||||
---
|
---
|
||||||
"node_info test":
|
"node_info test":
|
||||||
- do:
|
|
||||||
cluster.state: {}
|
|
||||||
|
|
||||||
# Get master node id
|
|
||||||
- set: { master_node: master }
|
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
nodes.info: {}
|
nodes.info: {}
|
||||||
|
- set:
|
||||||
|
nodes._arbitrary_key_: node_id
|
||||||
|
|
||||||
- is_true: nodes
|
- is_true: nodes
|
||||||
- is_true: cluster_name
|
- is_true: cluster_name
|
||||||
- is_true: nodes.$master.roles
|
- is_true: nodes.$node_id.roles
|
||||||
|
|
|
@ -2,15 +2,15 @@
|
||||||
|
|
||||||
"node_info test profile is empty":
|
"node_info test profile is empty":
|
||||||
- skip:
|
- skip:
|
||||||
features: stash_in_path
|
features: [stash_in_path, arbitrary_key]
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
cluster.state: {}
|
nodes.info: {}
|
||||||
|
- set:
|
||||||
- set: {master_node: master}
|
nodes._arbitrary_key_: node_id
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
nodes.info:
|
nodes.info:
|
||||||
metric: [ transport ]
|
metric: [ transport ]
|
||||||
|
|
||||||
- is_true: nodes.$master.transport.profiles
|
- is_true: nodes.$node_id.transport.profiles
|
||||||
|
|
|
@ -1,19 +1,22 @@
|
||||||
---
|
---
|
||||||
"node_info test flat_settings":
|
"node_info test flat_settings":
|
||||||
- do:
|
- skip:
|
||||||
cluster.state: {}
|
features: [arbitrary_key]
|
||||||
|
|
||||||
- set: { master_node: master }
|
- do:
|
||||||
|
nodes.info: {}
|
||||||
|
- set:
|
||||||
|
nodes._arbitrary_key_: node_id
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
nodes.info:
|
nodes.info:
|
||||||
metric: [ settings ]
|
metric: [ settings ]
|
||||||
|
|
||||||
- match : { nodes.$master.settings.client.type: node }
|
- match : { nodes.$node_id.settings.client.type: node }
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
nodes.info:
|
nodes.info:
|
||||||
metric: [ settings ]
|
metric: [ settings ]
|
||||||
flat_settings: true
|
flat_settings: true
|
||||||
|
|
||||||
- match : { nodes.$master.settings.client\.type: node }
|
- match : { nodes.$node_id.settings.client\.type: node }
|
||||||
|
|
|
@ -9,17 +9,20 @@
|
||||||
|
|
||||||
---
|
---
|
||||||
"Nodes stats level":
|
"Nodes stats level":
|
||||||
- do:
|
- skip:
|
||||||
cluster.state: {}
|
features: [arbitrary_key]
|
||||||
|
|
||||||
- set: { master_node: master }
|
- do:
|
||||||
|
nodes.info: {}
|
||||||
|
- set:
|
||||||
|
nodes._arbitrary_key_: node_id
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
nodes.stats:
|
nodes.stats:
|
||||||
metric: [ indices ]
|
metric: [ indices ]
|
||||||
level: "indices"
|
level: "indices"
|
||||||
|
|
||||||
- is_true: nodes.$master.indices.indices
|
- is_true: nodes.$node_id.indices.indices
|
||||||
|
|
||||||
---
|
---
|
||||||
"Nodes stats unrecognized parameter":
|
"Nodes stats unrecognized parameter":
|
||||||
|
|
|
@ -1,211 +1,227 @@
|
||||||
---
|
---
|
||||||
"Metric - blank":
|
"Metric - blank":
|
||||||
|
- skip:
|
||||||
|
features: [arbitrary_key]
|
||||||
- do:
|
- do:
|
||||||
cluster.state: {}
|
nodes.info: {}
|
||||||
|
- set:
|
||||||
- set: { master_node: master }
|
nodes._arbitrary_key_: node_id
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
nodes.stats: {}
|
nodes.stats: {}
|
||||||
|
|
||||||
- is_true: nodes.$master.indices.docs
|
- is_true: nodes.$node_id.indices.docs
|
||||||
- is_true: nodes.$master.indices.store
|
- is_true: nodes.$node_id.indices.store
|
||||||
- is_true: nodes.$master.indices.indexing
|
- is_true: nodes.$node_id.indices.indexing
|
||||||
- is_true: nodes.$master.indices.get
|
- is_true: nodes.$node_id.indices.get
|
||||||
- is_true: nodes.$master.indices.search
|
- is_true: nodes.$node_id.indices.search
|
||||||
- is_true: nodes.$master.indices.merges
|
- is_true: nodes.$node_id.indices.merges
|
||||||
- is_true: nodes.$master.indices.refresh
|
- is_true: nodes.$node_id.indices.refresh
|
||||||
- is_true: nodes.$master.indices.flush
|
- is_true: nodes.$node_id.indices.flush
|
||||||
- is_true: nodes.$master.indices.warmer
|
- is_true: nodes.$node_id.indices.warmer
|
||||||
- is_true: nodes.$master.indices.query_cache
|
- is_true: nodes.$node_id.indices.query_cache
|
||||||
- is_true: nodes.$master.indices.fielddata
|
- is_true: nodes.$node_id.indices.fielddata
|
||||||
- is_true: nodes.$master.indices.completion
|
- is_true: nodes.$node_id.indices.completion
|
||||||
- is_true: nodes.$master.indices.segments
|
- is_true: nodes.$node_id.indices.segments
|
||||||
- is_true: nodes.$master.indices.translog
|
- is_true: nodes.$node_id.indices.translog
|
||||||
- is_true: nodes.$master.indices.recovery
|
- is_true: nodes.$node_id.indices.recovery
|
||||||
|
|
||||||
---
|
---
|
||||||
"Metric - _all":
|
"Metric - _all":
|
||||||
|
- skip:
|
||||||
|
features: [arbitrary_key]
|
||||||
- do:
|
- do:
|
||||||
cluster.state: {}
|
nodes.info: {}
|
||||||
|
- set:
|
||||||
- set: { master_node: master }
|
nodes._arbitrary_key_: node_id
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
nodes.stats: { metric: _all }
|
nodes.stats: { metric: _all }
|
||||||
|
|
||||||
- is_true: nodes.$master.indices.docs
|
- is_true: nodes.$node_id.indices.docs
|
||||||
- is_true: nodes.$master.indices.store
|
- is_true: nodes.$node_id.indices.store
|
||||||
- is_true: nodes.$master.indices.indexing
|
- is_true: nodes.$node_id.indices.indexing
|
||||||
- is_true: nodes.$master.indices.get
|
- is_true: nodes.$node_id.indices.get
|
||||||
- is_true: nodes.$master.indices.search
|
- is_true: nodes.$node_id.indices.search
|
||||||
- is_true: nodes.$master.indices.merges
|
- is_true: nodes.$node_id.indices.merges
|
||||||
- is_true: nodes.$master.indices.refresh
|
- is_true: nodes.$node_id.indices.refresh
|
||||||
- is_true: nodes.$master.indices.flush
|
- is_true: nodes.$node_id.indices.flush
|
||||||
- is_true: nodes.$master.indices.warmer
|
- is_true: nodes.$node_id.indices.warmer
|
||||||
- is_true: nodes.$master.indices.query_cache
|
- is_true: nodes.$node_id.indices.query_cache
|
||||||
- is_true: nodes.$master.indices.fielddata
|
- is_true: nodes.$node_id.indices.fielddata
|
||||||
- is_true: nodes.$master.indices.completion
|
- is_true: nodes.$node_id.indices.completion
|
||||||
- is_true: nodes.$master.indices.segments
|
- is_true: nodes.$node_id.indices.segments
|
||||||
- is_true: nodes.$master.indices.translog
|
- is_true: nodes.$node_id.indices.translog
|
||||||
- is_true: nodes.$master.indices.recovery
|
- is_true: nodes.$node_id.indices.recovery
|
||||||
|
|
||||||
---
|
---
|
||||||
"Metric - indices _all":
|
"Metric - indices _all":
|
||||||
|
- skip:
|
||||||
|
features: [arbitrary_key]
|
||||||
- do:
|
- do:
|
||||||
cluster.state: {}
|
nodes.info: {}
|
||||||
|
- set:
|
||||||
- set: { master_node: master }
|
nodes._arbitrary_key_: node_id
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
nodes.stats: { metric: indices, index_metric: _all }
|
nodes.stats: { metric: indices, index_metric: _all }
|
||||||
|
|
||||||
- is_true: nodes.$master.indices.docs
|
- is_true: nodes.$node_id.indices.docs
|
||||||
- is_true: nodes.$master.indices.store
|
- is_true: nodes.$node_id.indices.store
|
||||||
- is_true: nodes.$master.indices.indexing
|
- is_true: nodes.$node_id.indices.indexing
|
||||||
- is_true: nodes.$master.indices.get
|
- is_true: nodes.$node_id.indices.get
|
||||||
- is_true: nodes.$master.indices.search
|
- is_true: nodes.$node_id.indices.search
|
||||||
- is_true: nodes.$master.indices.merges
|
- is_true: nodes.$node_id.indices.merges
|
||||||
- is_true: nodes.$master.indices.refresh
|
- is_true: nodes.$node_id.indices.refresh
|
||||||
- is_true: nodes.$master.indices.flush
|
- is_true: nodes.$node_id.indices.flush
|
||||||
- is_true: nodes.$master.indices.warmer
|
- is_true: nodes.$node_id.indices.warmer
|
||||||
- is_true: nodes.$master.indices.query_cache
|
- is_true: nodes.$node_id.indices.query_cache
|
||||||
- is_true: nodes.$master.indices.fielddata
|
- is_true: nodes.$node_id.indices.fielddata
|
||||||
- is_true: nodes.$master.indices.completion
|
- is_true: nodes.$node_id.indices.completion
|
||||||
- is_true: nodes.$master.indices.segments
|
- is_true: nodes.$node_id.indices.segments
|
||||||
- is_true: nodes.$master.indices.translog
|
- is_true: nodes.$node_id.indices.translog
|
||||||
- is_true: nodes.$master.indices.recovery
|
- is_true: nodes.$node_id.indices.recovery
|
||||||
|
|
||||||
---
|
---
|
||||||
"Metric - one":
|
"Metric - one":
|
||||||
|
- skip:
|
||||||
|
features: [arbitrary_key]
|
||||||
- do:
|
- do:
|
||||||
cluster.state: {}
|
nodes.info: {}
|
||||||
|
- set:
|
||||||
- set: { master_node: master }
|
nodes._arbitrary_key_: node_id
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
nodes.stats: { metric: indices, index_metric: docs }
|
nodes.stats: { metric: indices, index_metric: docs }
|
||||||
|
|
||||||
- is_true: nodes.$master.indices.docs
|
- is_true: nodes.$node_id.indices.docs
|
||||||
- is_false: nodes.$master.indices.store
|
- is_false: nodes.$node_id.indices.store
|
||||||
- is_false: nodes.$master.indices.indexing
|
- is_false: nodes.$node_id.indices.indexing
|
||||||
- is_false: nodes.$master.indices.get
|
- is_false: nodes.$node_id.indices.get
|
||||||
- is_false: nodes.$master.indices.search
|
- is_false: nodes.$node_id.indices.search
|
||||||
- is_false: nodes.$master.indices.merges
|
- is_false: nodes.$node_id.indices.merges
|
||||||
- is_false: nodes.$master.indices.refresh
|
- is_false: nodes.$node_id.indices.refresh
|
||||||
- is_false: nodes.$master.indices.flush
|
- is_false: nodes.$node_id.indices.flush
|
||||||
- is_false: nodes.$master.indices.warmer
|
- is_false: nodes.$node_id.indices.warmer
|
||||||
- is_false: nodes.$master.indices.query_cache
|
- is_false: nodes.$node_id.indices.query_cache
|
||||||
- is_false: nodes.$master.indices.fielddata
|
- is_false: nodes.$node_id.indices.fielddata
|
||||||
- is_false: nodes.$master.indices.completion
|
- is_false: nodes.$node_id.indices.completion
|
||||||
- is_false: nodes.$master.indices.segments
|
- is_false: nodes.$node_id.indices.segments
|
||||||
- is_false: nodes.$master.indices.translog
|
- is_false: nodes.$node_id.indices.translog
|
||||||
- is_false: nodes.$master.indices.recovery
|
- is_false: nodes.$node_id.indices.recovery
|
||||||
|
|
||||||
---
|
---
|
||||||
"Metric - multi":
|
"Metric - multi":
|
||||||
|
- skip:
|
||||||
|
features: [arbitrary_key]
|
||||||
- do:
|
- do:
|
||||||
cluster.state: {}
|
nodes.info: {}
|
||||||
|
- set:
|
||||||
- set: { master_node: master }
|
nodes._arbitrary_key_: node_id
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
nodes.stats: { metric: indices, index_metric: [ store, get, merge ] }
|
nodes.stats: { metric: indices, index_metric: [ store, get, merge ] }
|
||||||
|
|
||||||
- is_false: nodes.$master.indices.docs
|
- is_false: nodes.$node_id.indices.docs
|
||||||
- is_true: nodes.$master.indices.store
|
- is_true: nodes.$node_id.indices.store
|
||||||
- is_false: nodes.$master.indices.indexing
|
- is_false: nodes.$node_id.indices.indexing
|
||||||
- is_true: nodes.$master.indices.get
|
- is_true: nodes.$node_id.indices.get
|
||||||
- is_false: nodes.$master.indices.search
|
- is_false: nodes.$node_id.indices.search
|
||||||
- is_true: nodes.$master.indices.merges
|
- is_true: nodes.$node_id.indices.merges
|
||||||
- is_false: nodes.$master.indices.refresh
|
- is_false: nodes.$node_id.indices.refresh
|
||||||
- is_false: nodes.$master.indices.flush
|
- is_false: nodes.$node_id.indices.flush
|
||||||
- is_false: nodes.$master.indices.warmer
|
- is_false: nodes.$node_id.indices.warmer
|
||||||
- is_false: nodes.$master.indices.query_cache
|
- is_false: nodes.$node_id.indices.query_cache
|
||||||
- is_false: nodes.$master.indices.fielddata
|
- is_false: nodes.$node_id.indices.fielddata
|
||||||
- is_false: nodes.$master.indices.completion
|
- is_false: nodes.$node_id.indices.completion
|
||||||
- is_false: nodes.$master.indices.segments
|
- is_false: nodes.$node_id.indices.segments
|
||||||
- is_false: nodes.$master.indices.translog
|
- is_false: nodes.$node_id.indices.translog
|
||||||
- is_false: nodes.$master.indices.recovery
|
- is_false: nodes.$node_id.indices.recovery
|
||||||
|
|
||||||
|
|
||||||
---
|
---
|
||||||
"Metric - recovery":
|
"Metric - recovery":
|
||||||
|
- skip:
|
||||||
|
features: [arbitrary_key]
|
||||||
- do:
|
- do:
|
||||||
cluster.state: {}
|
nodes.info: {}
|
||||||
|
- set:
|
||||||
- set: { master_node: master }
|
nodes._arbitrary_key_: node_id
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
nodes.stats: { metric: indices, index_metric: [ recovery ] }
|
nodes.stats: { metric: indices, index_metric: [ recovery ] }
|
||||||
|
|
||||||
- is_false: nodes.$master.indices.docs
|
- is_false: nodes.$node_id.indices.docs
|
||||||
- is_false: nodes.$master.indices.store
|
- is_false: nodes.$node_id.indices.store
|
||||||
- is_false: nodes.$master.indices.indexing
|
- is_false: nodes.$node_id.indices.indexing
|
||||||
- is_false: nodes.$master.indices.get
|
- is_false: nodes.$node_id.indices.get
|
||||||
- is_false: nodes.$master.indices.search
|
- is_false: nodes.$node_id.indices.search
|
||||||
- is_false: nodes.$master.indices.merges
|
- is_false: nodes.$node_id.indices.merges
|
||||||
- is_false: nodes.$master.indices.refresh
|
- is_false: nodes.$node_id.indices.refresh
|
||||||
- is_false: nodes.$master.indices.flush
|
- is_false: nodes.$node_id.indices.flush
|
||||||
- is_false: nodes.$master.indices.warmer
|
- is_false: nodes.$node_id.indices.warmer
|
||||||
- is_false: nodes.$master.indices.query_cache
|
- is_false: nodes.$node_id.indices.query_cache
|
||||||
- is_false: nodes.$master.indices.fielddata
|
- is_false: nodes.$node_id.indices.fielddata
|
||||||
- is_false: nodes.$master.indices.completion
|
- is_false: nodes.$node_id.indices.completion
|
||||||
- is_false: nodes.$master.indices.segments
|
- is_false: nodes.$node_id.indices.segments
|
||||||
- is_false: nodes.$master.indices.translog
|
- is_false: nodes.$node_id.indices.translog
|
||||||
- is_true: nodes.$master.indices.recovery
|
- is_true: nodes.$node_id.indices.recovery
|
||||||
|
|
||||||
---
|
---
|
||||||
"Metric - _all include_segment_file_sizes":
|
"Metric - _all include_segment_file_sizes":
|
||||||
|
- skip:
|
||||||
|
features: [arbitrary_key]
|
||||||
- do:
|
- do:
|
||||||
cluster.state: {}
|
nodes.info: {}
|
||||||
|
- set:
|
||||||
- set: { master_node: master }
|
nodes._arbitrary_key_: node_id
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
nodes.stats: { metric: indices, index_metric: _all, include_segment_file_sizes: true }
|
nodes.stats: { metric: indices, index_metric: _all, include_segment_file_sizes: true }
|
||||||
|
|
||||||
- is_true: nodes.$master.indices.docs
|
- is_true: nodes.$node_id.indices.docs
|
||||||
- is_true: nodes.$master.indices.store
|
- is_true: nodes.$node_id.indices.store
|
||||||
- is_true: nodes.$master.indices.indexing
|
- is_true: nodes.$node_id.indices.indexing
|
||||||
- is_true: nodes.$master.indices.get
|
- is_true: nodes.$node_id.indices.get
|
||||||
- is_true: nodes.$master.indices.search
|
- is_true: nodes.$node_id.indices.search
|
||||||
- is_true: nodes.$master.indices.merges
|
- is_true: nodes.$node_id.indices.merges
|
||||||
- is_true: nodes.$master.indices.refresh
|
- is_true: nodes.$node_id.indices.refresh
|
||||||
- is_true: nodes.$master.indices.flush
|
- is_true: nodes.$node_id.indices.flush
|
||||||
- is_true: nodes.$master.indices.warmer
|
- is_true: nodes.$node_id.indices.warmer
|
||||||
- is_true: nodes.$master.indices.query_cache
|
- is_true: nodes.$node_id.indices.query_cache
|
||||||
- is_true: nodes.$master.indices.fielddata
|
- is_true: nodes.$node_id.indices.fielddata
|
||||||
- is_true: nodes.$master.indices.completion
|
- is_true: nodes.$node_id.indices.completion
|
||||||
- is_true: nodes.$master.indices.segments
|
- is_true: nodes.$node_id.indices.segments
|
||||||
- is_true: nodes.$master.indices.translog
|
- is_true: nodes.$node_id.indices.translog
|
||||||
- is_true: nodes.$master.indices.recovery
|
- is_true: nodes.$node_id.indices.recovery
|
||||||
- is_true: nodes.$master.indices.segments.file_sizes
|
- is_true: nodes.$node_id.indices.segments.file_sizes
|
||||||
|
|
||||||
---
|
---
|
||||||
"Metric - segments include_segment_file_sizes":
|
"Metric - segments include_segment_file_sizes":
|
||||||
|
- skip:
|
||||||
|
features: [arbitrary_key]
|
||||||
- do:
|
- do:
|
||||||
cluster.state: {}
|
nodes.info: {}
|
||||||
|
- set:
|
||||||
- set: { master_node: master }
|
nodes._arbitrary_key_: node_id
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
nodes.stats: { metric: indices, index_metric: segments, include_segment_file_sizes: true }
|
nodes.stats: { metric: indices, index_metric: segments, include_segment_file_sizes: true }
|
||||||
|
|
||||||
- is_false: nodes.$master.indices.docs
|
- is_false: nodes.$node_id.indices.docs
|
||||||
- is_false: nodes.$master.indices.store
|
- is_false: nodes.$node_id.indices.store
|
||||||
- is_false: nodes.$master.indices.indexing
|
- is_false: nodes.$node_id.indices.indexing
|
||||||
- is_false: nodes.$master.indices.get
|
- is_false: nodes.$node_id.indices.get
|
||||||
- is_false: nodes.$master.indices.search
|
- is_false: nodes.$node_id.indices.search
|
||||||
- is_false: nodes.$master.indices.merges
|
- is_false: nodes.$node_id.indices.merges
|
||||||
- is_false: nodes.$master.indices.refresh
|
- is_false: nodes.$node_id.indices.refresh
|
||||||
- is_false: nodes.$master.indices.flush
|
- is_false: nodes.$node_id.indices.flush
|
||||||
- is_false: nodes.$master.indices.warmer
|
- is_false: nodes.$node_id.indices.warmer
|
||||||
- is_false: nodes.$master.indices.query_cache
|
- is_false: nodes.$node_id.indices.query_cache
|
||||||
- is_false: nodes.$master.indices.fielddata
|
- is_false: nodes.$node_id.indices.fielddata
|
||||||
- is_false: nodes.$master.indices.completion
|
- is_false: nodes.$node_id.indices.completion
|
||||||
- is_true: nodes.$master.indices.segments
|
- is_true: nodes.$node_id.indices.segments
|
||||||
- is_false: nodes.$master.indices.translog
|
- is_false: nodes.$node_id.indices.translog
|
||||||
- is_false: nodes.$master.indices.recovery
|
- is_false: nodes.$node_id.indices.recovery
|
||||||
- is_true: nodes.$master.indices.segments.file_sizes
|
- is_true: nodes.$node_id.indices.segments.file_sizes
|
||||||
|
|
||||||
|
|
|
@ -1,10 +1,11 @@
|
||||||
---
|
---
|
||||||
"Nodes Stats with response filtering":
|
"Nodes Stats with response filtering":
|
||||||
|
- skip:
|
||||||
|
features: [arbitrary_key]
|
||||||
- do:
|
- do:
|
||||||
cluster.state: {}
|
nodes.info: {}
|
||||||
|
- set:
|
||||||
# Get master node id
|
nodes._arbitrary_key_: node_id
|
||||||
- set: { master_node: master }
|
|
||||||
|
|
||||||
# Nodes Stats with no filtering
|
# Nodes Stats with no filtering
|
||||||
- do:
|
- do:
|
||||||
|
@ -12,18 +13,18 @@
|
||||||
|
|
||||||
- is_true: cluster_name
|
- is_true: cluster_name
|
||||||
- is_true: nodes
|
- is_true: nodes
|
||||||
- is_true: nodes.$master.name
|
- is_true: nodes.$node_id.name
|
||||||
- is_true: nodes.$master.indices
|
- is_true: nodes.$node_id.indices
|
||||||
- is_true: nodes.$master.indices.docs
|
- is_true: nodes.$node_id.indices.docs
|
||||||
- gte: { nodes.$master.indices.docs.count: 0 }
|
- gte: { nodes.$node_id.indices.docs.count: 0 }
|
||||||
- is_true: nodes.$master.indices.segments
|
- is_true: nodes.$node_id.indices.segments
|
||||||
- gte: { nodes.$master.indices.segments.count: 0 }
|
- gte: { nodes.$node_id.indices.segments.count: 0 }
|
||||||
- is_true: nodes.$master.jvm
|
- is_true: nodes.$node_id.jvm
|
||||||
- is_true: nodes.$master.jvm.threads
|
- is_true: nodes.$node_id.jvm.threads
|
||||||
- gte: { nodes.$master.jvm.threads.count: 0 }
|
- gte: { nodes.$node_id.jvm.threads.count: 0 }
|
||||||
- is_true: nodes.$master.jvm.buffer_pools.direct
|
- is_true: nodes.$node_id.jvm.buffer_pools.direct
|
||||||
- gte: { nodes.$master.jvm.buffer_pools.direct.count: 0 }
|
- gte: { nodes.$node_id.jvm.buffer_pools.direct.count: 0 }
|
||||||
- gte: { nodes.$master.jvm.buffer_pools.direct.used_in_bytes: 0 }
|
- gte: { nodes.$node_id.jvm.buffer_pools.direct.used_in_bytes: 0 }
|
||||||
|
|
||||||
# Nodes Stats with only "cluster_name" field
|
# Nodes Stats with only "cluster_name" field
|
||||||
- do:
|
- do:
|
||||||
|
@ -32,9 +33,9 @@
|
||||||
|
|
||||||
- is_true: cluster_name
|
- is_true: cluster_name
|
||||||
- is_false: nodes
|
- is_false: nodes
|
||||||
- is_false: nodes.$master.name
|
- is_false: nodes.$node_id.name
|
||||||
- is_false: nodes.$master.indices
|
- is_false: nodes.$node_id.indices
|
||||||
- is_false: nodes.$master.jvm
|
- is_false: nodes.$node_id.jvm
|
||||||
|
|
||||||
# Nodes Stats with "nodes" field and sub-fields
|
# Nodes Stats with "nodes" field and sub-fields
|
||||||
- do:
|
- do:
|
||||||
|
@ -43,18 +44,18 @@
|
||||||
|
|
||||||
- is_false: cluster_name
|
- is_false: cluster_name
|
||||||
- is_true: nodes
|
- is_true: nodes
|
||||||
- is_true: nodes.$master.name
|
- is_true: nodes.$node_id.name
|
||||||
- is_true: nodes.$master.indices
|
- is_true: nodes.$node_id.indices
|
||||||
- is_true: nodes.$master.indices.docs
|
- is_true: nodes.$node_id.indices.docs
|
||||||
- gte: { nodes.$master.indices.docs.count: 0 }
|
- gte: { nodes.$node_id.indices.docs.count: 0 }
|
||||||
- is_true: nodes.$master.indices.segments
|
- is_true: nodes.$node_id.indices.segments
|
||||||
- gte: { nodes.$master.indices.segments.count: 0 }
|
- gte: { nodes.$node_id.indices.segments.count: 0 }
|
||||||
- is_true: nodes.$master.jvm
|
- is_true: nodes.$node_id.jvm
|
||||||
- is_true: nodes.$master.jvm.threads
|
- is_true: nodes.$node_id.jvm.threads
|
||||||
- gte: { nodes.$master.jvm.threads.count: 0 }
|
- gte: { nodes.$node_id.jvm.threads.count: 0 }
|
||||||
- is_true: nodes.$master.jvm.buffer_pools.direct
|
- is_true: nodes.$node_id.jvm.buffer_pools.direct
|
||||||
- gte: { nodes.$master.jvm.buffer_pools.direct.count: 0 }
|
- gte: { nodes.$node_id.jvm.buffer_pools.direct.count: 0 }
|
||||||
- gte: { nodes.$master.jvm.buffer_pools.direct.used_in_bytes: 0 }
|
- gte: { nodes.$node_id.jvm.buffer_pools.direct.used_in_bytes: 0 }
|
||||||
|
|
||||||
# Nodes Stats with "nodes.*.indices" field and sub-fields
|
# Nodes Stats with "nodes.*.indices" field and sub-fields
|
||||||
- do:
|
- do:
|
||||||
|
@ -63,13 +64,13 @@
|
||||||
|
|
||||||
- is_false: cluster_name
|
- is_false: cluster_name
|
||||||
- is_true: nodes
|
- is_true: nodes
|
||||||
- is_false: nodes.$master.name
|
- is_false: nodes.$node_id.name
|
||||||
- is_true: nodes.$master.indices
|
- is_true: nodes.$node_id.indices
|
||||||
- is_true: nodes.$master.indices.docs
|
- is_true: nodes.$node_id.indices.docs
|
||||||
- gte: { nodes.$master.indices.docs.count: 0 }
|
- gte: { nodes.$node_id.indices.docs.count: 0 }
|
||||||
- is_true: nodes.$master.indices.segments
|
- is_true: nodes.$node_id.indices.segments
|
||||||
- gte: { nodes.$master.indices.segments.count: 0 }
|
- gte: { nodes.$node_id.indices.segments.count: 0 }
|
||||||
- is_false: nodes.$master.jvm
|
- is_false: nodes.$node_id.jvm
|
||||||
|
|
||||||
# Nodes Stats with "nodes.*.name" and "nodes.*.indices.docs.count" fields
|
# Nodes Stats with "nodes.*.name" and "nodes.*.indices.docs.count" fields
|
||||||
- do:
|
- do:
|
||||||
|
@ -78,12 +79,12 @@
|
||||||
|
|
||||||
- is_false: cluster_name
|
- is_false: cluster_name
|
||||||
- is_true: nodes
|
- is_true: nodes
|
||||||
- is_true: nodes.$master.name
|
- is_true: nodes.$node_id.name
|
||||||
- is_true: nodes.$master.indices
|
- is_true: nodes.$node_id.indices
|
||||||
- is_true: nodes.$master.indices.docs
|
- is_true: nodes.$node_id.indices.docs
|
||||||
- gte: { nodes.$master.indices.docs.count: 0 }
|
- gte: { nodes.$node_id.indices.docs.count: 0 }
|
||||||
- is_false: nodes.$master.indices.segments
|
- is_false: nodes.$node_id.indices.segments
|
||||||
- is_false: nodes.$master.jvm
|
- is_false: nodes.$node_id.jvm
|
||||||
|
|
||||||
# Nodes Stats with all "count" fields
|
# Nodes Stats with all "count" fields
|
||||||
- do:
|
- do:
|
||||||
|
@ -92,18 +93,18 @@
|
||||||
|
|
||||||
- is_false: cluster_name
|
- is_false: cluster_name
|
||||||
- is_true: nodes
|
- is_true: nodes
|
||||||
- is_false: nodes.$master.name
|
- is_false: nodes.$node_id.name
|
||||||
- is_true: nodes.$master.indices
|
- is_true: nodes.$node_id.indices
|
||||||
- is_true: nodes.$master.indices.docs
|
- is_true: nodes.$node_id.indices.docs
|
||||||
- gte: { nodes.$master.indices.docs.count: 0 }
|
- gte: { nodes.$node_id.indices.docs.count: 0 }
|
||||||
- is_true: nodes.$master.indices.segments
|
- is_true: nodes.$node_id.indices.segments
|
||||||
- gte: { nodes.$master.indices.segments.count: 0 }
|
- gte: { nodes.$node_id.indices.segments.count: 0 }
|
||||||
- is_true: nodes.$master.jvm
|
- is_true: nodes.$node_id.jvm
|
||||||
- is_true: nodes.$master.jvm.threads
|
- is_true: nodes.$node_id.jvm.threads
|
||||||
- gte: { nodes.$master.jvm.threads.count: 0 }
|
- gte: { nodes.$node_id.jvm.threads.count: 0 }
|
||||||
- is_true: nodes.$master.jvm.buffer_pools.direct
|
- is_true: nodes.$node_id.jvm.buffer_pools.direct
|
||||||
- gte: { nodes.$master.jvm.buffer_pools.direct.count: 0 }
|
- gte: { nodes.$node_id.jvm.buffer_pools.direct.count: 0 }
|
||||||
- is_false: nodes.$master.jvm.buffer_pools.direct.used_in_bytes
|
- is_false: nodes.$node_id.jvm.buffer_pools.direct.used_in_bytes
|
||||||
|
|
||||||
# Nodes Stats with all "count" fields in sub-fields of "jvm" field
|
# Nodes Stats with all "count" fields in sub-fields of "jvm" field
|
||||||
- do:
|
- do:
|
||||||
|
@ -112,16 +113,16 @@
|
||||||
|
|
||||||
- is_false: cluster_name
|
- is_false: cluster_name
|
||||||
- is_true: nodes
|
- is_true: nodes
|
||||||
- is_false: nodes.$master.name
|
- is_false: nodes.$node_id.name
|
||||||
- is_false: nodes.$master.indices
|
- is_false: nodes.$node_id.indices
|
||||||
- is_false: nodes.$master.indices.docs.count
|
- is_false: nodes.$node_id.indices.docs.count
|
||||||
- is_false: nodes.$master.indices.segments.count
|
- is_false: nodes.$node_id.indices.segments.count
|
||||||
- is_true: nodes.$master.jvm
|
- is_true: nodes.$node_id.jvm
|
||||||
- is_true: nodes.$master.jvm.threads
|
- is_true: nodes.$node_id.jvm.threads
|
||||||
- gte: { nodes.$master.jvm.threads.count: 0 }
|
- gte: { nodes.$node_id.jvm.threads.count: 0 }
|
||||||
- is_true: nodes.$master.jvm.buffer_pools.direct
|
- is_true: nodes.$node_id.jvm.buffer_pools.direct
|
||||||
- gte: { nodes.$master.jvm.buffer_pools.direct.count: 0 }
|
- gte: { nodes.$node_id.jvm.buffer_pools.direct.count: 0 }
|
||||||
- is_false: nodes.$master.jvm.buffer_pools.direct.used_in_bytes
|
- is_false: nodes.$node_id.jvm.buffer_pools.direct.used_in_bytes
|
||||||
|
|
||||||
# Nodes Stats with "nodes.*.fs.data" fields
|
# Nodes Stats with "nodes.*.fs.data" fields
|
||||||
- do:
|
- do:
|
||||||
|
@ -130,13 +131,13 @@
|
||||||
|
|
||||||
- is_false: cluster_name
|
- is_false: cluster_name
|
||||||
- is_true: nodes
|
- is_true: nodes
|
||||||
- is_false: nodes.$master.name
|
- is_false: nodes.$node_id.name
|
||||||
- is_false: nodes.$master.indices
|
- is_false: nodes.$node_id.indices
|
||||||
- is_false: nodes.$master.jvm
|
- is_false: nodes.$node_id.jvm
|
||||||
- is_true: nodes.$master.fs.data
|
- is_true: nodes.$node_id.fs.data
|
||||||
- is_true: nodes.$master.fs.data.0.path
|
- is_true: nodes.$node_id.fs.data.0.path
|
||||||
- is_true: nodes.$master.fs.data.0.type
|
- is_true: nodes.$node_id.fs.data.0.type
|
||||||
- is_true: nodes.$master.fs.data.0.total_in_bytes
|
- is_true: nodes.$node_id.fs.data.0.total_in_bytes
|
||||||
|
|
||||||
# Nodes Stats with "nodes.*.fs.data.t*" fields
|
# Nodes Stats with "nodes.*.fs.data.t*" fields
|
||||||
- do:
|
- do:
|
||||||
|
@ -145,21 +146,22 @@
|
||||||
|
|
||||||
- is_false: cluster_name
|
- is_false: cluster_name
|
||||||
- is_true: nodes
|
- is_true: nodes
|
||||||
- is_false: nodes.$master.name
|
- is_false: nodes.$node_id.name
|
||||||
- is_false: nodes.$master.indices
|
- is_false: nodes.$node_id.indices
|
||||||
- is_false: nodes.$master.jvm
|
- is_false: nodes.$node_id.jvm
|
||||||
- is_true: nodes.$master.fs.data
|
- is_true: nodes.$node_id.fs.data
|
||||||
- is_false: nodes.$master.fs.data.0.path
|
- is_false: nodes.$node_id.fs.data.0.path
|
||||||
- is_true: nodes.$master.fs.data.0.type
|
- is_true: nodes.$node_id.fs.data.0.type
|
||||||
- is_true: nodes.$master.fs.data.0.total_in_bytes
|
- is_true: nodes.$node_id.fs.data.0.total_in_bytes
|
||||||
|
|
||||||
---
|
---
|
||||||
"Nodes Stats filtered using both includes and excludes filters":
|
"Nodes Stats filtered using both includes and excludes filters":
|
||||||
|
- skip:
|
||||||
|
features: [arbitrary_key]
|
||||||
- do:
|
- do:
|
||||||
cluster.state: {}
|
nodes.info: {}
|
||||||
|
- set:
|
||||||
# Get master node id
|
nodes._arbitrary_key_: node_id
|
||||||
- set: { master_node: master }
|
|
||||||
|
|
||||||
# Nodes Stats with "nodes" field but no JVM stats
|
# Nodes Stats with "nodes" field but no JVM stats
|
||||||
- do:
|
- do:
|
||||||
|
@ -168,10 +170,10 @@
|
||||||
|
|
||||||
- is_false: cluster_name
|
- is_false: cluster_name
|
||||||
- is_true: nodes
|
- is_true: nodes
|
||||||
- is_true: nodes.$master.name
|
- is_true: nodes.$node_id.name
|
||||||
- is_true: nodes.$master.os
|
- is_true: nodes.$node_id.os
|
||||||
- is_false: nodes.$master.indices
|
- is_false: nodes.$node_id.indices
|
||||||
- is_false: nodes.$master.jvm
|
- is_false: nodes.$node_id.jvm
|
||||||
|
|
||||||
# Nodes Stats with "nodes.*.indices" field and sub-fields but no indices segments
|
# Nodes Stats with "nodes.*.indices" field and sub-fields but no indices segments
|
||||||
- do:
|
- do:
|
||||||
|
@ -180,10 +182,10 @@
|
||||||
|
|
||||||
- is_false: cluster_name
|
- is_false: cluster_name
|
||||||
- is_true: nodes
|
- is_true: nodes
|
||||||
- is_false: nodes.$master.name
|
- is_false: nodes.$node_id.name
|
||||||
- is_true: nodes.$master.indices
|
- is_true: nodes.$node_id.indices
|
||||||
- is_true: nodes.$master.indices.docs
|
- is_true: nodes.$node_id.indices.docs
|
||||||
- is_false: nodes.$master.indices.segments
|
- is_false: nodes.$node_id.indices.segments
|
||||||
|
|
||||||
# Nodes Stats with "nodes.*.fs.data.t*" fields but no "type" field
|
# Nodes Stats with "nodes.*.fs.data.t*" fields but no "type" field
|
||||||
- do:
|
- do:
|
||||||
|
@ -192,9 +194,9 @@
|
||||||
|
|
||||||
- is_false: cluster_name
|
- is_false: cluster_name
|
||||||
- is_true: nodes
|
- is_true: nodes
|
||||||
- is_false: nodes.$master.name
|
- is_false: nodes.$node_id.name
|
||||||
- is_false: nodes.$master.indices
|
- is_false: nodes.$node_id.indices
|
||||||
- is_false: nodes.$master.jvm
|
- is_false: nodes.$node_id.jvm
|
||||||
- is_true: nodes.$master.fs.data
|
- is_true: nodes.$node_id.fs.data
|
||||||
- is_false: nodes.$master.fs.data.0.type
|
- is_false: nodes.$node_id.fs.data.0.type
|
||||||
- is_true: nodes.$master.fs.data.0.total_in_bytes
|
- is_true: nodes.$node_id.fs.data.0.total_in_bytes
|
||||||
|
|
|
@ -1,13 +1,13 @@
|
||||||
---
|
---
|
||||||
"Discovery stats":
|
"Discovery stats":
|
||||||
- skip:
|
- skip:
|
||||||
version: " - 6.0.99"
|
features: [arbitrary_key]
|
||||||
reason: "published_cluster_states_received arrived in 6.1.0"
|
|
||||||
- do:
|
|
||||||
cluster.state: {}
|
|
||||||
|
|
||||||
# Get master node id
|
- do:
|
||||||
- set: { master_node: master }
|
nodes.info:
|
||||||
|
node_id: _master
|
||||||
|
- set:
|
||||||
|
nodes._arbitrary_key_: master
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
nodes.stats:
|
nodes.stats:
|
||||||
|
|
|
@ -1,9 +1,6 @@
|
||||||
---
|
---
|
||||||
"get task test":
|
"get task test":
|
||||||
# Note that this gets much better testing in reindex's tests because it actually saves the task
|
# Note that this gets much better testing in reindex's tests because it actually saves the task
|
||||||
- do:
|
|
||||||
cluster.state: {}
|
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
catch: missing
|
catch: missing
|
||||||
tasks.get:
|
tasks.get:
|
||||||
|
|
|
@ -1,16 +1,18 @@
|
||||||
---
|
---
|
||||||
"tasks_list test":
|
"tasks_list test":
|
||||||
- do:
|
- skip:
|
||||||
cluster.state: {}
|
features: [arbitrary_key]
|
||||||
|
|
||||||
# Get master node id
|
- do:
|
||||||
- set: { master_node: master }
|
nodes.info: {}
|
||||||
|
- set:
|
||||||
|
nodes._arbitrary_key_: node_id
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
tasks.list: {}
|
tasks.list: {}
|
||||||
|
|
||||||
- is_true: nodes
|
- is_true: nodes
|
||||||
- is_true: nodes.$master.roles
|
- is_true: nodes.$node_id.roles
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
tasks.list:
|
tasks.list:
|
||||||
|
|
|
@ -47,7 +47,8 @@ public final class Features {
|
||||||
"warnings",
|
"warnings",
|
||||||
"yaml",
|
"yaml",
|
||||||
"contains",
|
"contains",
|
||||||
"transform_and_set"
|
"transform_and_set",
|
||||||
|
"arbitrary_key"
|
||||||
));
|
));
|
||||||
|
|
||||||
private Features() {
|
private Features() {
|
||||||
|
|
|
@ -102,7 +102,17 @@ public class ObjectPath {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (object instanceof Map) {
|
if (object instanceof Map) {
|
||||||
return ((Map<String, Object>) object).get(key);
|
final Map<String, Object> objectAsMap = (Map<String, Object>) object;
|
||||||
|
if ("_arbitrary_key_".equals(key)) {
|
||||||
|
if (objectAsMap.isEmpty()) {
|
||||||
|
throw new IllegalArgumentException("requested [" + key + "] but the map was empty");
|
||||||
|
}
|
||||||
|
if (objectAsMap.containsKey(key)) {
|
||||||
|
throw new IllegalArgumentException("requested meta-key [" + key + "] but the map unexpectedly contains this key");
|
||||||
|
}
|
||||||
|
return objectAsMap.keySet().iterator().next();
|
||||||
|
}
|
||||||
|
return objectAsMap.get(key);
|
||||||
}
|
}
|
||||||
if (object instanceof List) {
|
if (object instanceof List) {
|
||||||
List<Object> list = (List<Object>) object;
|
List<Object> list = (List<Object>) object;
|
||||||
|
@ -149,7 +159,7 @@ public class ObjectPath {
|
||||||
list.add(current.toString());
|
list.add(current.toString());
|
||||||
}
|
}
|
||||||
|
|
||||||
return list.toArray(new String[list.size()]);
|
return list.toArray(new String[0]);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -34,6 +34,7 @@ import static org.hamcrest.Matchers.contains;
|
||||||
import static org.hamcrest.Matchers.containsString;
|
import static org.hamcrest.Matchers.containsString;
|
||||||
import static org.hamcrest.Matchers.equalTo;
|
import static org.hamcrest.Matchers.equalTo;
|
||||||
import static org.hamcrest.Matchers.instanceOf;
|
import static org.hamcrest.Matchers.instanceOf;
|
||||||
|
import static org.hamcrest.Matchers.isOneOf;
|
||||||
import static org.hamcrest.Matchers.notNullValue;
|
import static org.hamcrest.Matchers.notNullValue;
|
||||||
import static org.hamcrest.Matchers.nullValue;
|
import static org.hamcrest.Matchers.nullValue;
|
||||||
|
|
||||||
|
@ -181,6 +182,56 @@ public class ObjectPathTests extends ESTestCase {
|
||||||
assertThat(strings, contains("template_1", "template_2"));
|
assertThat(strings, contains("template_1", "template_2"));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testEvaluateArbitraryKey() throws Exception {
|
||||||
|
XContentBuilder xContentBuilder = randomXContentBuilder();
|
||||||
|
xContentBuilder.startObject();
|
||||||
|
xContentBuilder.startObject("metadata");
|
||||||
|
xContentBuilder.startObject("templates");
|
||||||
|
xContentBuilder.startObject("template_1");
|
||||||
|
xContentBuilder.field("field1", "value");
|
||||||
|
xContentBuilder.endObject();
|
||||||
|
xContentBuilder.startObject("template_2");
|
||||||
|
xContentBuilder.field("field2", "value");
|
||||||
|
xContentBuilder.field("field3", "value");
|
||||||
|
xContentBuilder.endObject();
|
||||||
|
xContentBuilder.startObject("template_3");
|
||||||
|
xContentBuilder.endObject();
|
||||||
|
xContentBuilder.startObject("template_4");
|
||||||
|
xContentBuilder.field("_arbitrary_key_", "value");
|
||||||
|
xContentBuilder.endObject();
|
||||||
|
xContentBuilder.endObject();
|
||||||
|
xContentBuilder.endObject();
|
||||||
|
xContentBuilder.endObject();
|
||||||
|
ObjectPath objectPath = ObjectPath.createFromXContent(xContentBuilder.contentType().xContent(),
|
||||||
|
BytesReference.bytes(xContentBuilder));
|
||||||
|
|
||||||
|
{
|
||||||
|
final Object object = objectPath.evaluate("metadata.templates.template_1._arbitrary_key_");
|
||||||
|
assertThat(object, instanceOf(String.class));
|
||||||
|
final String key = (String) object;
|
||||||
|
assertThat(key, equalTo("field1"));
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
final Object object = objectPath.evaluate("metadata.templates.template_2._arbitrary_key_");
|
||||||
|
assertThat(object, instanceOf(String.class));
|
||||||
|
final String key = (String) object;
|
||||||
|
assertThat(key, isOneOf("field2", "field3"));
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
final IllegalArgumentException exception
|
||||||
|
= expectThrows(IllegalArgumentException.class, () -> objectPath.evaluate("metadata.templates.template_3._arbitrary_key_"));
|
||||||
|
assertThat(exception.getMessage(), equalTo("requested [_arbitrary_key_] but the map was empty"));
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
final IllegalArgumentException exception
|
||||||
|
= expectThrows(IllegalArgumentException.class, () -> objectPath.evaluate("metadata.templates.template_4._arbitrary_key_"));
|
||||||
|
assertThat(exception.getMessage(), equalTo("requested meta-key [_arbitrary_key_] but the map unexpectedly contains this key"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public void testEvaluateStashInPropertyName() throws Exception {
|
public void testEvaluateStashInPropertyName() throws Exception {
|
||||||
XContentBuilder xContentBuilder = randomXContentBuilder();
|
XContentBuilder xContentBuilder = randomXContentBuilder();
|
||||||
xContentBuilder.startObject();
|
xContentBuilder.startObject();
|
||||||
|
|
Loading…
Reference in New Issue