Get node ID from nodes info in REST tests (#40052) (#40532)

We discussed recently that the cluster state API should be considered
"internal" and therefore our usual cast-iron stability guarantees do not hold
for this API.

However, there are a good number of REST tests that try to identify the master
node. Today they call `GET /_cluster/state` API and extract the master node ID
from the response. In fact many of these tests just want an arbitary node ID
(or perhaps a data node ID) so an alternative is to call `GET _nodes` or `GET
_nodes/data:true` and obtain a node ID from the keys of the `nodes` map in the
response.

This change adds the ability for YAML-based REST tests to extract an arbitrary
key from a map so that they can obtain a node ID from the nodes info API
instead of using the master node ID from the cluster state API.

Relates #40047.
This commit is contained in:
David Turner 2019-03-27 23:08:10 +00:00 committed by GitHub
parent 23395a9b9f
commit 5a2ba34174
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 406 additions and 326 deletions

View File

@ -12,10 +12,6 @@
- match: { acknowledged: true }
- do:
cluster.state:
metric: [ master_node ]
- do:
cluster.allocation_explain:
body: { "index": "test", "shard": 0, "primary": true }
@ -37,10 +33,6 @@
index: test
body: { "settings": { "index.number_of_shards": 1, "index.number_of_replicas": 9 } }
- do:
cluster.state:
metric: [ master_node ]
- do:
cluster.allocation_explain:
include_disk_info: true

View File

@ -25,12 +25,14 @@ setup:
---
"Explain API for non-existent node & shard":
- skip:
features: [arbitrary_key]
- do:
cluster.state:
metric: [ master_node ]
- set: {master_node: node_id}
nodes.info:
node_id: data:true
- set:
nodes._arbitrary_key_: node_id
- do:
cluster.reroute:

View File

@ -3,18 +3,20 @@
- skip:
version: " - 6.9.99"
reason: expects warnings that pre-7.0.0 will not send
features: "warnings"
# creates an index with one document solely allocated on the master node
features: [warnings, arbitrary_key]
# creates an index with one document solely allocated on a particular data node
# and shrinks it into a new index with a single shard
# we don't do the relocation to a single node after the index is created
# here since in a mixed version cluster we can't identify
# which node is the one with the highest version and that is the only one that can safely
# be used to shrink the index.
- do:
cluster.state: {}
# Get master node id
- set: { master_node: master }
- do:
nodes.info:
node_id: data:true
- set:
nodes._arbitrary_key_: node_id
- do:
indices.create:
@ -22,8 +24,8 @@
wait_for_active_shards: 1
body:
settings:
# ensure everything is allocated on a single node
index.routing.allocation.include._id: $master
# ensure everything is allocated on the same data node
index.routing.allocation.include._id: $node_id
index.number_of_shards: 2
index.number_of_replicas: 0
- do:

View File

@ -3,13 +3,13 @@
- skip:
version: " - 6.9.99"
reason: expects warnings that pre-7.0.0 will not send
features: "warnings"
features: [warnings, arbitrary_key]
- do:
cluster.state: {}
# Get master node id
- set: { master_node: master }
nodes.info:
node_id: data:true
- set:
nodes._arbitrary_key_: node_id
# create index
- do:
@ -19,7 +19,7 @@
body:
settings:
# ensure everything is allocated on a single node
index.routing.allocation.include._id: $master
index.routing.allocation.include._id: $node_id
index.number_of_shards: 2
index.number_of_replicas: 0
mappings:

View File

@ -3,13 +3,13 @@
- skip:
version: " - 6.9.99"
reason: expects warnings that pre-7.0.0 will not send
features: "warnings"
features: [warnings, arbitrary_key]
- do:
cluster.state: {}
# get master node id
- set: { master_node: master }
nodes.info:
node_id: data:true
- set:
nodes._arbitrary_key_: node_id
- do:
indices.create:
@ -17,8 +17,8 @@
wait_for_active_shards: 1
body:
settings:
# ensure everything is allocated on the master node
index.routing.allocation.include._id: $master
# ensure everything is allocated on the same node
index.routing.allocation.include._id: $node_id
index.number_of_shards: 2
index.number_of_replicas: 0
index.merge.scheduler.max_merge_count: 4
@ -63,7 +63,7 @@
- match: { copy-settings-target.settings.index.merge.scheduler.max_merge_count: "4" }
- match: { copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" }
- match: { copy-settings-target.settings.index.blocks.write: "true" }
- match: { copy-settings-target.settings.index.routing.allocation.include._id: $master }
- match: { copy-settings-target.settings.index.routing.allocation.include._id: $node_id }
# now we do a actual shrink and copy settings (by default)
- do:
@ -89,7 +89,7 @@
- match: { default-copy-settings-target.settings.index.merge.scheduler.max_merge_count: "4" }
- match: { default-copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" }
- match: { default-copy-settings-target.settings.index.blocks.write: "true" }
- match: { default-copy-settings-target.settings.index.routing.allocation.include._id: $master }
- match: { default-copy-settings-target.settings.index.routing.allocation.include._id: $node_id }
# now we do a actual shrink and try to set no copy settings
- do:

View File

@ -3,13 +3,13 @@
- skip:
version: " - 6.9.99"
reason: expects warnings that pre-7.0.0 will not send
features: "warnings"
features: [arbitrary_key, warnings]
- do:
cluster.state: {}
# get master node id
- set: { master_node: master }
nodes.info:
node_id: data:true
- set:
nodes._arbitrary_key_: node_id
- do:
indices.create:
@ -17,8 +17,8 @@
wait_for_active_shards: 1
body:
settings:
# ensure everything is allocated on the master node
index.routing.allocation.include._id: $master
# ensure everything is allocated on the same node
index.routing.allocation.include._id: $node_id
index.number_of_replicas: 0
index.number_of_shards: 1
index.number_of_routing_shards: 4
@ -66,7 +66,7 @@
- match: { copy-settings-target.settings.index.merge.scheduler.max_merge_count: "4" }
- match: { copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" }
- match: { copy-settings-target.settings.index.blocks.write: "true" }
- match: { copy-settings-target.settings.index.routing.allocation.include._id: $master }
- match: { copy-settings-target.settings.index.routing.allocation.include._id: $node_id }
# now we do a actual shrink and copy settings (by default)
- do:
@ -93,7 +93,7 @@
- match: { default-copy-settings-target.settings.index.merge.scheduler.max_merge_count: "4" }
- match: { default-copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" }
- match: { default-copy-settings-target.settings.index.blocks.write: "true" }
- match: { default-copy-settings-target.settings.index.routing.allocation.include._id: $master }
- match: { default-copy-settings-target.settings.index.routing.allocation.include._id: $node_id }
- do:
catch: /illegal_argument_exception/

View File

@ -1,14 +1,13 @@
setup:
- skip:
features: [arbitrary_key]
---
"node_info test":
- do:
cluster.state: {}
# Get master node id
- set: { master_node: master }
- do:
nodes.info: {}
- set:
nodes._arbitrary_key_: node_id
- is_true: nodes
- is_true: cluster_name
- is_true: nodes.$master.roles
- is_true: nodes.$node_id.roles

View File

@ -2,15 +2,15 @@
"node_info test profile is empty":
- skip:
features: stash_in_path
features: [stash_in_path, arbitrary_key]
- do:
cluster.state: {}
- set: {master_node: master}
nodes.info: {}
- set:
nodes._arbitrary_key_: node_id
- do:
nodes.info:
metric: [ transport ]
- is_true: nodes.$master.transport.profiles
- is_true: nodes.$node_id.transport.profiles

View File

@ -1,19 +1,22 @@
---
"node_info test flat_settings":
- do:
cluster.state: {}
- skip:
features: [arbitrary_key]
- set: { master_node: master }
- do:
nodes.info: {}
- set:
nodes._arbitrary_key_: node_id
- do:
nodes.info:
metric: [ settings ]
- match : { nodes.$master.settings.client.type: node }
- match : { nodes.$node_id.settings.client.type: node }
- do:
nodes.info:
metric: [ settings ]
flat_settings: true
- match : { nodes.$master.settings.client\.type: node }
- match : { nodes.$node_id.settings.client\.type: node }

View File

@ -9,17 +9,20 @@
---
"Nodes stats level":
- do:
cluster.state: {}
- skip:
features: [arbitrary_key]
- set: { master_node: master }
- do:
nodes.info: {}
- set:
nodes._arbitrary_key_: node_id
- do:
nodes.stats:
metric: [ indices ]
level: "indices"
- is_true: nodes.$master.indices.indices
- is_true: nodes.$node_id.indices.indices
---
"Nodes stats unrecognized parameter":

View File

@ -1,211 +1,227 @@
---
"Metric - blank":
- skip:
features: [arbitrary_key]
- do:
cluster.state: {}
- set: { master_node: master }
nodes.info: {}
- set:
nodes._arbitrary_key_: node_id
- do:
nodes.stats: {}
- is_true: nodes.$master.indices.docs
- is_true: nodes.$master.indices.store
- is_true: nodes.$master.indices.indexing
- is_true: nodes.$master.indices.get
- is_true: nodes.$master.indices.search
- is_true: nodes.$master.indices.merges
- is_true: nodes.$master.indices.refresh
- is_true: nodes.$master.indices.flush
- is_true: nodes.$master.indices.warmer
- is_true: nodes.$master.indices.query_cache
- is_true: nodes.$master.indices.fielddata
- is_true: nodes.$master.indices.completion
- is_true: nodes.$master.indices.segments
- is_true: nodes.$master.indices.translog
- is_true: nodes.$master.indices.recovery
- is_true: nodes.$node_id.indices.docs
- is_true: nodes.$node_id.indices.store
- is_true: nodes.$node_id.indices.indexing
- is_true: nodes.$node_id.indices.get
- is_true: nodes.$node_id.indices.search
- is_true: nodes.$node_id.indices.merges
- is_true: nodes.$node_id.indices.refresh
- is_true: nodes.$node_id.indices.flush
- is_true: nodes.$node_id.indices.warmer
- is_true: nodes.$node_id.indices.query_cache
- is_true: nodes.$node_id.indices.fielddata
- is_true: nodes.$node_id.indices.completion
- is_true: nodes.$node_id.indices.segments
- is_true: nodes.$node_id.indices.translog
- is_true: nodes.$node_id.indices.recovery
---
"Metric - _all":
- skip:
features: [arbitrary_key]
- do:
cluster.state: {}
- set: { master_node: master }
nodes.info: {}
- set:
nodes._arbitrary_key_: node_id
- do:
nodes.stats: { metric: _all }
- is_true: nodes.$master.indices.docs
- is_true: nodes.$master.indices.store
- is_true: nodes.$master.indices.indexing
- is_true: nodes.$master.indices.get
- is_true: nodes.$master.indices.search
- is_true: nodes.$master.indices.merges
- is_true: nodes.$master.indices.refresh
- is_true: nodes.$master.indices.flush
- is_true: nodes.$master.indices.warmer
- is_true: nodes.$master.indices.query_cache
- is_true: nodes.$master.indices.fielddata
- is_true: nodes.$master.indices.completion
- is_true: nodes.$master.indices.segments
- is_true: nodes.$master.indices.translog
- is_true: nodes.$master.indices.recovery
- is_true: nodes.$node_id.indices.docs
- is_true: nodes.$node_id.indices.store
- is_true: nodes.$node_id.indices.indexing
- is_true: nodes.$node_id.indices.get
- is_true: nodes.$node_id.indices.search
- is_true: nodes.$node_id.indices.merges
- is_true: nodes.$node_id.indices.refresh
- is_true: nodes.$node_id.indices.flush
- is_true: nodes.$node_id.indices.warmer
- is_true: nodes.$node_id.indices.query_cache
- is_true: nodes.$node_id.indices.fielddata
- is_true: nodes.$node_id.indices.completion
- is_true: nodes.$node_id.indices.segments
- is_true: nodes.$node_id.indices.translog
- is_true: nodes.$node_id.indices.recovery
---
"Metric - indices _all":
- skip:
features: [arbitrary_key]
- do:
cluster.state: {}
- set: { master_node: master }
nodes.info: {}
- set:
nodes._arbitrary_key_: node_id
- do:
nodes.stats: { metric: indices, index_metric: _all }
- is_true: nodes.$master.indices.docs
- is_true: nodes.$master.indices.store
- is_true: nodes.$master.indices.indexing
- is_true: nodes.$master.indices.get
- is_true: nodes.$master.indices.search
- is_true: nodes.$master.indices.merges
- is_true: nodes.$master.indices.refresh
- is_true: nodes.$master.indices.flush
- is_true: nodes.$master.indices.warmer
- is_true: nodes.$master.indices.query_cache
- is_true: nodes.$master.indices.fielddata
- is_true: nodes.$master.indices.completion
- is_true: nodes.$master.indices.segments
- is_true: nodes.$master.indices.translog
- is_true: nodes.$master.indices.recovery
- is_true: nodes.$node_id.indices.docs
- is_true: nodes.$node_id.indices.store
- is_true: nodes.$node_id.indices.indexing
- is_true: nodes.$node_id.indices.get
- is_true: nodes.$node_id.indices.search
- is_true: nodes.$node_id.indices.merges
- is_true: nodes.$node_id.indices.refresh
- is_true: nodes.$node_id.indices.flush
- is_true: nodes.$node_id.indices.warmer
- is_true: nodes.$node_id.indices.query_cache
- is_true: nodes.$node_id.indices.fielddata
- is_true: nodes.$node_id.indices.completion
- is_true: nodes.$node_id.indices.segments
- is_true: nodes.$node_id.indices.translog
- is_true: nodes.$node_id.indices.recovery
---
"Metric - one":
- skip:
features: [arbitrary_key]
- do:
cluster.state: {}
- set: { master_node: master }
nodes.info: {}
- set:
nodes._arbitrary_key_: node_id
- do:
nodes.stats: { metric: indices, index_metric: docs }
- is_true: nodes.$master.indices.docs
- is_false: nodes.$master.indices.store
- is_false: nodes.$master.indices.indexing
- is_false: nodes.$master.indices.get
- is_false: nodes.$master.indices.search
- is_false: nodes.$master.indices.merges
- is_false: nodes.$master.indices.refresh
- is_false: nodes.$master.indices.flush
- is_false: nodes.$master.indices.warmer
- is_false: nodes.$master.indices.query_cache
- is_false: nodes.$master.indices.fielddata
- is_false: nodes.$master.indices.completion
- is_false: nodes.$master.indices.segments
- is_false: nodes.$master.indices.translog
- is_false: nodes.$master.indices.recovery
- is_true: nodes.$node_id.indices.docs
- is_false: nodes.$node_id.indices.store
- is_false: nodes.$node_id.indices.indexing
- is_false: nodes.$node_id.indices.get
- is_false: nodes.$node_id.indices.search
- is_false: nodes.$node_id.indices.merges
- is_false: nodes.$node_id.indices.refresh
- is_false: nodes.$node_id.indices.flush
- is_false: nodes.$node_id.indices.warmer
- is_false: nodes.$node_id.indices.query_cache
- is_false: nodes.$node_id.indices.fielddata
- is_false: nodes.$node_id.indices.completion
- is_false: nodes.$node_id.indices.segments
- is_false: nodes.$node_id.indices.translog
- is_false: nodes.$node_id.indices.recovery
---
"Metric - multi":
- skip:
features: [arbitrary_key]
- do:
cluster.state: {}
- set: { master_node: master }
nodes.info: {}
- set:
nodes._arbitrary_key_: node_id
- do:
nodes.stats: { metric: indices, index_metric: [ store, get, merge ] }
- is_false: nodes.$master.indices.docs
- is_true: nodes.$master.indices.store
- is_false: nodes.$master.indices.indexing
- is_true: nodes.$master.indices.get
- is_false: nodes.$master.indices.search
- is_true: nodes.$master.indices.merges
- is_false: nodes.$master.indices.refresh
- is_false: nodes.$master.indices.flush
- is_false: nodes.$master.indices.warmer
- is_false: nodes.$master.indices.query_cache
- is_false: nodes.$master.indices.fielddata
- is_false: nodes.$master.indices.completion
- is_false: nodes.$master.indices.segments
- is_false: nodes.$master.indices.translog
- is_false: nodes.$master.indices.recovery
- is_false: nodes.$node_id.indices.docs
- is_true: nodes.$node_id.indices.store
- is_false: nodes.$node_id.indices.indexing
- is_true: nodes.$node_id.indices.get
- is_false: nodes.$node_id.indices.search
- is_true: nodes.$node_id.indices.merges
- is_false: nodes.$node_id.indices.refresh
- is_false: nodes.$node_id.indices.flush
- is_false: nodes.$node_id.indices.warmer
- is_false: nodes.$node_id.indices.query_cache
- is_false: nodes.$node_id.indices.fielddata
- is_false: nodes.$node_id.indices.completion
- is_false: nodes.$node_id.indices.segments
- is_false: nodes.$node_id.indices.translog
- is_false: nodes.$node_id.indices.recovery
---
"Metric - recovery":
- skip:
features: [arbitrary_key]
- do:
cluster.state: {}
- set: { master_node: master }
nodes.info: {}
- set:
nodes._arbitrary_key_: node_id
- do:
nodes.stats: { metric: indices, index_metric: [ recovery ] }
- is_false: nodes.$master.indices.docs
- is_false: nodes.$master.indices.store
- is_false: nodes.$master.indices.indexing
- is_false: nodes.$master.indices.get
- is_false: nodes.$master.indices.search
- is_false: nodes.$master.indices.merges
- is_false: nodes.$master.indices.refresh
- is_false: nodes.$master.indices.flush
- is_false: nodes.$master.indices.warmer
- is_false: nodes.$master.indices.query_cache
- is_false: nodes.$master.indices.fielddata
- is_false: nodes.$master.indices.completion
- is_false: nodes.$master.indices.segments
- is_false: nodes.$master.indices.translog
- is_true: nodes.$master.indices.recovery
- is_false: nodes.$node_id.indices.docs
- is_false: nodes.$node_id.indices.store
- is_false: nodes.$node_id.indices.indexing
- is_false: nodes.$node_id.indices.get
- is_false: nodes.$node_id.indices.search
- is_false: nodes.$node_id.indices.merges
- is_false: nodes.$node_id.indices.refresh
- is_false: nodes.$node_id.indices.flush
- is_false: nodes.$node_id.indices.warmer
- is_false: nodes.$node_id.indices.query_cache
- is_false: nodes.$node_id.indices.fielddata
- is_false: nodes.$node_id.indices.completion
- is_false: nodes.$node_id.indices.segments
- is_false: nodes.$node_id.indices.translog
- is_true: nodes.$node_id.indices.recovery
---
"Metric - _all include_segment_file_sizes":
- skip:
features: [arbitrary_key]
- do:
cluster.state: {}
- set: { master_node: master }
nodes.info: {}
- set:
nodes._arbitrary_key_: node_id
- do:
nodes.stats: { metric: indices, index_metric: _all, include_segment_file_sizes: true }
- is_true: nodes.$master.indices.docs
- is_true: nodes.$master.indices.store
- is_true: nodes.$master.indices.indexing
- is_true: nodes.$master.indices.get
- is_true: nodes.$master.indices.search
- is_true: nodes.$master.indices.merges
- is_true: nodes.$master.indices.refresh
- is_true: nodes.$master.indices.flush
- is_true: nodes.$master.indices.warmer
- is_true: nodes.$master.indices.query_cache
- is_true: nodes.$master.indices.fielddata
- is_true: nodes.$master.indices.completion
- is_true: nodes.$master.indices.segments
- is_true: nodes.$master.indices.translog
- is_true: nodes.$master.indices.recovery
- is_true: nodes.$master.indices.segments.file_sizes
- is_true: nodes.$node_id.indices.docs
- is_true: nodes.$node_id.indices.store
- is_true: nodes.$node_id.indices.indexing
- is_true: nodes.$node_id.indices.get
- is_true: nodes.$node_id.indices.search
- is_true: nodes.$node_id.indices.merges
- is_true: nodes.$node_id.indices.refresh
- is_true: nodes.$node_id.indices.flush
- is_true: nodes.$node_id.indices.warmer
- is_true: nodes.$node_id.indices.query_cache
- is_true: nodes.$node_id.indices.fielddata
- is_true: nodes.$node_id.indices.completion
- is_true: nodes.$node_id.indices.segments
- is_true: nodes.$node_id.indices.translog
- is_true: nodes.$node_id.indices.recovery
- is_true: nodes.$node_id.indices.segments.file_sizes
---
"Metric - segments include_segment_file_sizes":
- skip:
features: [arbitrary_key]
- do:
cluster.state: {}
- set: { master_node: master }
nodes.info: {}
- set:
nodes._arbitrary_key_: node_id
- do:
nodes.stats: { metric: indices, index_metric: segments, include_segment_file_sizes: true }
- is_false: nodes.$master.indices.docs
- is_false: nodes.$master.indices.store
- is_false: nodes.$master.indices.indexing
- is_false: nodes.$master.indices.get
- is_false: nodes.$master.indices.search
- is_false: nodes.$master.indices.merges
- is_false: nodes.$master.indices.refresh
- is_false: nodes.$master.indices.flush
- is_false: nodes.$master.indices.warmer
- is_false: nodes.$master.indices.query_cache
- is_false: nodes.$master.indices.fielddata
- is_false: nodes.$master.indices.completion
- is_true: nodes.$master.indices.segments
- is_false: nodes.$master.indices.translog
- is_false: nodes.$master.indices.recovery
- is_true: nodes.$master.indices.segments.file_sizes
- is_false: nodes.$node_id.indices.docs
- is_false: nodes.$node_id.indices.store
- is_false: nodes.$node_id.indices.indexing
- is_false: nodes.$node_id.indices.get
- is_false: nodes.$node_id.indices.search
- is_false: nodes.$node_id.indices.merges
- is_false: nodes.$node_id.indices.refresh
- is_false: nodes.$node_id.indices.flush
- is_false: nodes.$node_id.indices.warmer
- is_false: nodes.$node_id.indices.query_cache
- is_false: nodes.$node_id.indices.fielddata
- is_false: nodes.$node_id.indices.completion
- is_true: nodes.$node_id.indices.segments
- is_false: nodes.$node_id.indices.translog
- is_false: nodes.$node_id.indices.recovery
- is_true: nodes.$node_id.indices.segments.file_sizes

View File

@ -1,10 +1,11 @@
---
"Nodes Stats with response filtering":
- skip:
features: [arbitrary_key]
- do:
cluster.state: {}
# Get master node id
- set: { master_node: master }
nodes.info: {}
- set:
nodes._arbitrary_key_: node_id
# Nodes Stats with no filtering
- do:
@ -12,18 +13,18 @@
- is_true: cluster_name
- is_true: nodes
- is_true: nodes.$master.name
- is_true: nodes.$master.indices
- is_true: nodes.$master.indices.docs
- gte: { nodes.$master.indices.docs.count: 0 }
- is_true: nodes.$master.indices.segments
- gte: { nodes.$master.indices.segments.count: 0 }
- is_true: nodes.$master.jvm
- is_true: nodes.$master.jvm.threads
- gte: { nodes.$master.jvm.threads.count: 0 }
- is_true: nodes.$master.jvm.buffer_pools.direct
- gte: { nodes.$master.jvm.buffer_pools.direct.count: 0 }
- gte: { nodes.$master.jvm.buffer_pools.direct.used_in_bytes: 0 }
- is_true: nodes.$node_id.name
- is_true: nodes.$node_id.indices
- is_true: nodes.$node_id.indices.docs
- gte: { nodes.$node_id.indices.docs.count: 0 }
- is_true: nodes.$node_id.indices.segments
- gte: { nodes.$node_id.indices.segments.count: 0 }
- is_true: nodes.$node_id.jvm
- is_true: nodes.$node_id.jvm.threads
- gte: { nodes.$node_id.jvm.threads.count: 0 }
- is_true: nodes.$node_id.jvm.buffer_pools.direct
- gte: { nodes.$node_id.jvm.buffer_pools.direct.count: 0 }
- gte: { nodes.$node_id.jvm.buffer_pools.direct.used_in_bytes: 0 }
# Nodes Stats with only "cluster_name" field
- do:
@ -32,9 +33,9 @@
- is_true: cluster_name
- is_false: nodes
- is_false: nodes.$master.name
- is_false: nodes.$master.indices
- is_false: nodes.$master.jvm
- is_false: nodes.$node_id.name
- is_false: nodes.$node_id.indices
- is_false: nodes.$node_id.jvm
# Nodes Stats with "nodes" field and sub-fields
- do:
@ -43,18 +44,18 @@
- is_false: cluster_name
- is_true: nodes
- is_true: nodes.$master.name
- is_true: nodes.$master.indices
- is_true: nodes.$master.indices.docs
- gte: { nodes.$master.indices.docs.count: 0 }
- is_true: nodes.$master.indices.segments
- gte: { nodes.$master.indices.segments.count: 0 }
- is_true: nodes.$master.jvm
- is_true: nodes.$master.jvm.threads
- gte: { nodes.$master.jvm.threads.count: 0 }
- is_true: nodes.$master.jvm.buffer_pools.direct
- gte: { nodes.$master.jvm.buffer_pools.direct.count: 0 }
- gte: { nodes.$master.jvm.buffer_pools.direct.used_in_bytes: 0 }
- is_true: nodes.$node_id.name
- is_true: nodes.$node_id.indices
- is_true: nodes.$node_id.indices.docs
- gte: { nodes.$node_id.indices.docs.count: 0 }
- is_true: nodes.$node_id.indices.segments
- gte: { nodes.$node_id.indices.segments.count: 0 }
- is_true: nodes.$node_id.jvm
- is_true: nodes.$node_id.jvm.threads
- gte: { nodes.$node_id.jvm.threads.count: 0 }
- is_true: nodes.$node_id.jvm.buffer_pools.direct
- gte: { nodes.$node_id.jvm.buffer_pools.direct.count: 0 }
- gte: { nodes.$node_id.jvm.buffer_pools.direct.used_in_bytes: 0 }
# Nodes Stats with "nodes.*.indices" field and sub-fields
- do:
@ -63,13 +64,13 @@
- is_false: cluster_name
- is_true: nodes
- is_false: nodes.$master.name
- is_true: nodes.$master.indices
- is_true: nodes.$master.indices.docs
- gte: { nodes.$master.indices.docs.count: 0 }
- is_true: nodes.$master.indices.segments
- gte: { nodes.$master.indices.segments.count: 0 }
- is_false: nodes.$master.jvm
- is_false: nodes.$node_id.name
- is_true: nodes.$node_id.indices
- is_true: nodes.$node_id.indices.docs
- gte: { nodes.$node_id.indices.docs.count: 0 }
- is_true: nodes.$node_id.indices.segments
- gte: { nodes.$node_id.indices.segments.count: 0 }
- is_false: nodes.$node_id.jvm
# Nodes Stats with "nodes.*.name" and "nodes.*.indices.docs.count" fields
- do:
@ -78,12 +79,12 @@
- is_false: cluster_name
- is_true: nodes
- is_true: nodes.$master.name
- is_true: nodes.$master.indices
- is_true: nodes.$master.indices.docs
- gte: { nodes.$master.indices.docs.count: 0 }
- is_false: nodes.$master.indices.segments
- is_false: nodes.$master.jvm
- is_true: nodes.$node_id.name
- is_true: nodes.$node_id.indices
- is_true: nodes.$node_id.indices.docs
- gte: { nodes.$node_id.indices.docs.count: 0 }
- is_false: nodes.$node_id.indices.segments
- is_false: nodes.$node_id.jvm
# Nodes Stats with all "count" fields
- do:
@ -92,18 +93,18 @@
- is_false: cluster_name
- is_true: nodes
- is_false: nodes.$master.name
- is_true: nodes.$master.indices
- is_true: nodes.$master.indices.docs
- gte: { nodes.$master.indices.docs.count: 0 }
- is_true: nodes.$master.indices.segments
- gte: { nodes.$master.indices.segments.count: 0 }
- is_true: nodes.$master.jvm
- is_true: nodes.$master.jvm.threads
- gte: { nodes.$master.jvm.threads.count: 0 }
- is_true: nodes.$master.jvm.buffer_pools.direct
- gte: { nodes.$master.jvm.buffer_pools.direct.count: 0 }
- is_false: nodes.$master.jvm.buffer_pools.direct.used_in_bytes
- is_false: nodes.$node_id.name
- is_true: nodes.$node_id.indices
- is_true: nodes.$node_id.indices.docs
- gte: { nodes.$node_id.indices.docs.count: 0 }
- is_true: nodes.$node_id.indices.segments
- gte: { nodes.$node_id.indices.segments.count: 0 }
- is_true: nodes.$node_id.jvm
- is_true: nodes.$node_id.jvm.threads
- gte: { nodes.$node_id.jvm.threads.count: 0 }
- is_true: nodes.$node_id.jvm.buffer_pools.direct
- gte: { nodes.$node_id.jvm.buffer_pools.direct.count: 0 }
- is_false: nodes.$node_id.jvm.buffer_pools.direct.used_in_bytes
# Nodes Stats with all "count" fields in sub-fields of "jvm" field
- do:
@ -112,16 +113,16 @@
- is_false: cluster_name
- is_true: nodes
- is_false: nodes.$master.name
- is_false: nodes.$master.indices
- is_false: nodes.$master.indices.docs.count
- is_false: nodes.$master.indices.segments.count
- is_true: nodes.$master.jvm
- is_true: nodes.$master.jvm.threads
- gte: { nodes.$master.jvm.threads.count: 0 }
- is_true: nodes.$master.jvm.buffer_pools.direct
- gte: { nodes.$master.jvm.buffer_pools.direct.count: 0 }
- is_false: nodes.$master.jvm.buffer_pools.direct.used_in_bytes
- is_false: nodes.$node_id.name
- is_false: nodes.$node_id.indices
- is_false: nodes.$node_id.indices.docs.count
- is_false: nodes.$node_id.indices.segments.count
- is_true: nodes.$node_id.jvm
- is_true: nodes.$node_id.jvm.threads
- gte: { nodes.$node_id.jvm.threads.count: 0 }
- is_true: nodes.$node_id.jvm.buffer_pools.direct
- gte: { nodes.$node_id.jvm.buffer_pools.direct.count: 0 }
- is_false: nodes.$node_id.jvm.buffer_pools.direct.used_in_bytes
# Nodes Stats with "nodes.*.fs.data" fields
- do:
@ -130,13 +131,13 @@
- is_false: cluster_name
- is_true: nodes
- is_false: nodes.$master.name
- is_false: nodes.$master.indices
- is_false: nodes.$master.jvm
- is_true: nodes.$master.fs.data
- is_true: nodes.$master.fs.data.0.path
- is_true: nodes.$master.fs.data.0.type
- is_true: nodes.$master.fs.data.0.total_in_bytes
- is_false: nodes.$node_id.name
- is_false: nodes.$node_id.indices
- is_false: nodes.$node_id.jvm
- is_true: nodes.$node_id.fs.data
- is_true: nodes.$node_id.fs.data.0.path
- is_true: nodes.$node_id.fs.data.0.type
- is_true: nodes.$node_id.fs.data.0.total_in_bytes
# Nodes Stats with "nodes.*.fs.data.t*" fields
- do:
@ -145,21 +146,22 @@
- is_false: cluster_name
- is_true: nodes
- is_false: nodes.$master.name
- is_false: nodes.$master.indices
- is_false: nodes.$master.jvm
- is_true: nodes.$master.fs.data
- is_false: nodes.$master.fs.data.0.path
- is_true: nodes.$master.fs.data.0.type
- is_true: nodes.$master.fs.data.0.total_in_bytes
- is_false: nodes.$node_id.name
- is_false: nodes.$node_id.indices
- is_false: nodes.$node_id.jvm
- is_true: nodes.$node_id.fs.data
- is_false: nodes.$node_id.fs.data.0.path
- is_true: nodes.$node_id.fs.data.0.type
- is_true: nodes.$node_id.fs.data.0.total_in_bytes
---
"Nodes Stats filtered using both includes and excludes filters":
- skip:
features: [arbitrary_key]
- do:
cluster.state: {}
# Get master node id
- set: { master_node: master }
nodes.info: {}
- set:
nodes._arbitrary_key_: node_id
# Nodes Stats with "nodes" field but no JVM stats
- do:
@ -168,10 +170,10 @@
- is_false: cluster_name
- is_true: nodes
- is_true: nodes.$master.name
- is_true: nodes.$master.os
- is_false: nodes.$master.indices
- is_false: nodes.$master.jvm
- is_true: nodes.$node_id.name
- is_true: nodes.$node_id.os
- is_false: nodes.$node_id.indices
- is_false: nodes.$node_id.jvm
# Nodes Stats with "nodes.*.indices" field and sub-fields but no indices segments
- do:
@ -180,10 +182,10 @@
- is_false: cluster_name
- is_true: nodes
- is_false: nodes.$master.name
- is_true: nodes.$master.indices
- is_true: nodes.$master.indices.docs
- is_false: nodes.$master.indices.segments
- is_false: nodes.$node_id.name
- is_true: nodes.$node_id.indices
- is_true: nodes.$node_id.indices.docs
- is_false: nodes.$node_id.indices.segments
# Nodes Stats with "nodes.*.fs.data.t*" fields but no "type" field
- do:
@ -192,9 +194,9 @@
- is_false: cluster_name
- is_true: nodes
- is_false: nodes.$master.name
- is_false: nodes.$master.indices
- is_false: nodes.$master.jvm
- is_true: nodes.$master.fs.data
- is_false: nodes.$master.fs.data.0.type
- is_true: nodes.$master.fs.data.0.total_in_bytes
- is_false: nodes.$node_id.name
- is_false: nodes.$node_id.indices
- is_false: nodes.$node_id.jvm
- is_true: nodes.$node_id.fs.data
- is_false: nodes.$node_id.fs.data.0.type
- is_true: nodes.$node_id.fs.data.0.total_in_bytes

View File

@ -1,13 +1,13 @@
---
"Discovery stats":
- skip:
version: " - 6.0.99"
reason: "published_cluster_states_received arrived in 6.1.0"
- do:
cluster.state: {}
features: [arbitrary_key]
# Get master node id
- set: { master_node: master }
- do:
nodes.info:
node_id: _master
- set:
nodes._arbitrary_key_: master
- do:
nodes.stats:

View File

@ -1,9 +1,6 @@
---
"get task test":
# Note that this gets much better testing in reindex's tests because it actually saves the task
- do:
cluster.state: {}
- do:
catch: missing
tasks.get:

View File

@ -1,16 +1,18 @@
---
"tasks_list test":
- do:
cluster.state: {}
- skip:
features: [arbitrary_key]
# Get master node id
- set: { master_node: master }
- do:
nodes.info: {}
- set:
nodes._arbitrary_key_: node_id
- do:
tasks.list: {}
- is_true: nodes
- is_true: nodes.$master.roles
- is_true: nodes.$node_id.roles
- do:
tasks.list:

View File

@ -47,7 +47,8 @@ public final class Features {
"warnings",
"yaml",
"contains",
"transform_and_set"
"transform_and_set",
"arbitrary_key"
));
private Features() {

View File

@ -102,7 +102,17 @@ public class ObjectPath {
}
if (object instanceof Map) {
return ((Map<String, Object>) object).get(key);
final Map<String, Object> objectAsMap = (Map<String, Object>) object;
if ("_arbitrary_key_".equals(key)) {
if (objectAsMap.isEmpty()) {
throw new IllegalArgumentException("requested [" + key + "] but the map was empty");
}
if (objectAsMap.containsKey(key)) {
throw new IllegalArgumentException("requested meta-key [" + key + "] but the map unexpectedly contains this key");
}
return objectAsMap.keySet().iterator().next();
}
return objectAsMap.get(key);
}
if (object instanceof List) {
List<Object> list = (List<Object>) object;
@ -149,7 +159,7 @@ public class ObjectPath {
list.add(current.toString());
}
return list.toArray(new String[list.size()]);
return list.toArray(new String[0]);
}
/**

View File

@ -34,6 +34,7 @@ import static org.hamcrest.Matchers.contains;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.isOneOf;
import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue;
@ -181,6 +182,56 @@ public class ObjectPathTests extends ESTestCase {
assertThat(strings, contains("template_1", "template_2"));
}
public void testEvaluateArbitraryKey() throws Exception {
XContentBuilder xContentBuilder = randomXContentBuilder();
xContentBuilder.startObject();
xContentBuilder.startObject("metadata");
xContentBuilder.startObject("templates");
xContentBuilder.startObject("template_1");
xContentBuilder.field("field1", "value");
xContentBuilder.endObject();
xContentBuilder.startObject("template_2");
xContentBuilder.field("field2", "value");
xContentBuilder.field("field3", "value");
xContentBuilder.endObject();
xContentBuilder.startObject("template_3");
xContentBuilder.endObject();
xContentBuilder.startObject("template_4");
xContentBuilder.field("_arbitrary_key_", "value");
xContentBuilder.endObject();
xContentBuilder.endObject();
xContentBuilder.endObject();
xContentBuilder.endObject();
ObjectPath objectPath = ObjectPath.createFromXContent(xContentBuilder.contentType().xContent(),
BytesReference.bytes(xContentBuilder));
{
final Object object = objectPath.evaluate("metadata.templates.template_1._arbitrary_key_");
assertThat(object, instanceOf(String.class));
final String key = (String) object;
assertThat(key, equalTo("field1"));
}
{
final Object object = objectPath.evaluate("metadata.templates.template_2._arbitrary_key_");
assertThat(object, instanceOf(String.class));
final String key = (String) object;
assertThat(key, isOneOf("field2", "field3"));
}
{
final IllegalArgumentException exception
= expectThrows(IllegalArgumentException.class, () -> objectPath.evaluate("metadata.templates.template_3._arbitrary_key_"));
assertThat(exception.getMessage(), equalTo("requested [_arbitrary_key_] but the map was empty"));
}
{
final IllegalArgumentException exception
= expectThrows(IllegalArgumentException.class, () -> objectPath.evaluate("metadata.templates.template_4._arbitrary_key_"));
assertThat(exception.getMessage(), equalTo("requested meta-key [_arbitrary_key_] but the map unexpectedly contains this key"));
}
}
public void testEvaluateStashInPropertyName() throws Exception {
XContentBuilder xContentBuilder = randomXContentBuilder();
xContentBuilder.startObject();