Tests: Replace YAML tests with ESTestRestCase to be able to wait for … (elastic/x-pack-elasticsearch#3252)

* Tests: Replace YAML tests with ESTestRestCase to be able to wait for events

The YAML tests did not have any possibility to wait for the watches to
be created. A hard ten second timeout was used, that could not be
aborted, by simulating a sleep when waiting for a number of nodes that
never occured in the cluster.

This commit replaces those waiting YAML tests with ESRestTestCases, that
use `assertBusy()` to exit early once the watches have been added. Also
this increases the wait time if needed, as these tests tend to fail on
CI.

relates elastic/x-pack-elasticsearch#3217

Original commit: elastic/x-pack-elasticsearch@74b9945d88
This commit is contained in:
Alexander Reelsen 2017-12-07 14:40:08 +01:00 committed by GitHub
parent e9d9199205
commit eaf67f8bc2
4 changed files with 118 additions and 191 deletions

View File

@ -1,24 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.smoketest;
import com.carrotsearch.randomizedtesting.annotations.Name;
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;
public class MonitoringWithWatcherClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
public MonitoringWithWatcherClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) {
super(testCandidate);
}
@ParametersFactory
public static Iterable<Object[]> parameters() throws Exception {
return ESClientYamlSuiteTestCase.createParameters();
}
}

View File

@ -0,0 +1,118 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.smoketest;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.elasticsearch.client.Response;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.test.rest.ESRestTestCase;
import org.elasticsearch.test.rest.yaml.ObjectPath;
import org.elasticsearch.xpack.watcher.actions.ActionBuilders;
import org.elasticsearch.xpack.watcher.client.WatchSourceBuilders;
import org.elasticsearch.xpack.watcher.trigger.TriggerBuilders;
import org.elasticsearch.xpack.watcher.trigger.schedule.IntervalSchedule;
import org.junit.After;
import java.io.IOException;
import java.util.Collections;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.xpack.watcher.input.InputBuilders.simpleInput;
import static org.elasticsearch.xpack.watcher.trigger.schedule.IntervalSchedule.Interval.Unit.MINUTES;
import static org.hamcrest.Matchers.is;
public class MonitoringWithWatcherRestIT extends ESRestTestCase {
@After
public void cleanExporters() throws Exception {
String body = jsonBuilder().startObject().startObject("transient")
.nullField("xpack.monitoring.exporters.*")
.endObject().endObject().string();
assertOK(adminClient().performRequest("PUT", "_cluster/settings", Collections.emptyMap(),
new StringEntity(body, ContentType.APPLICATION_JSON)));
assertOK(adminClient().performRequest("DELETE", ".watch*", Collections.emptyMap()));
}
public void testThatLocalExporterAddsWatches() throws Exception {
String watchId = createMonitoringWatch();
String body = jsonBuilder().startObject().startObject("transient")
.field("xpack.monitoring.exporters.my_local_exporter.type", "local")
.field("xpack.monitoring.exporters.my_local_exporter.cluster_alerts.management.enabled", true)
.endObject().endObject().bytes().utf8ToString();
adminClient().performRequest("PUT", "_cluster/settings", Collections.emptyMap(),
new StringEntity(body, ContentType.APPLICATION_JSON));
assertTotalWatchCount(5);
assertMonitoringWatchHasBeenOverWritten(watchId);
}
public void testThatHttpExporterAddsWatches() throws Exception {
String watchId = createMonitoringWatch();
String httpHost = getHttpHost();
String body = jsonBuilder().startObject().startObject("transient")
.field("xpack.monitoring.exporters.my_http_exporter.type", "http")
.field("xpack.monitoring.exporters.my_http_exporter.host", httpHost)
.field("xpack.monitoring.exporters.my_http_exporter.cluster_alerts.management.enabled", true)
.endObject().endObject().bytes().utf8ToString();
adminClient().performRequest("PUT", "_cluster/settings", Collections.emptyMap(),
new StringEntity(body, ContentType.APPLICATION_JSON));
assertTotalWatchCount(5);
assertMonitoringWatchHasBeenOverWritten(watchId);
}
private void assertMonitoringWatchHasBeenOverWritten(String watchId) throws Exception {
ObjectPath path = ObjectPath.createFromResponse(client().performRequest("GET", "_xpack/watcher/watch/" + watchId));
String interval = path.evaluate("watch.trigger.schedule.interval");
assertThat(interval, is("1m"));
}
private void assertTotalWatchCount(int expectedWatches) throws Exception {
assertBusy(() -> {
assertOK(client().performRequest("POST", ".watches/_refresh"));
ObjectPath path = ObjectPath.createFromResponse(client().performRequest("POST", ".watches/_count"));
int count = path.evaluate("count");
assertThat(count, is(expectedWatches));
});
}
private String createMonitoringWatch() throws Exception {
String clusterUUID = getClusterUUID();
String watchId = clusterUUID + "_kibana_version_mismatch";
String sampleWatch = WatchSourceBuilders.watchBuilder()
.trigger(TriggerBuilders.schedule(new IntervalSchedule(new IntervalSchedule.Interval(1000, MINUTES))))
.input(simpleInput())
.addAction("logme", ActionBuilders.loggingAction("foo"))
.buildAsBytes(XContentType.JSON).utf8ToString();
client().performRequest("PUT", "_xpack/watcher/watch/" + watchId, Collections.emptyMap(),
new StringEntity(sampleWatch, ContentType.APPLICATION_JSON));
return watchId;
}
private String getClusterUUID() throws Exception {
Response response = client().performRequest("GET", "_cluster/state/metadata", Collections.emptyMap());
ObjectPath objectPath = ObjectPath.createFromResponse(response);
String clusterUUID = objectPath.evaluate("metadata.cluster_uuid");
return clusterUUID;
}
public String getHttpHost() throws IOException {
ObjectPath path = ObjectPath.createFromResponse(client().performRequest("GET", "_cluster/state", Collections.emptyMap()));
String masterNodeId = path.evaluate("master_node");
ObjectPath nodesPath = ObjectPath.createFromResponse(client().performRequest("GET", "_nodes", Collections.emptyMap()));
String httpHost = nodesPath.evaluate("nodes." + masterNodeId + ".http.publish_address");
return httpHost;
}
}

View File

@ -1,80 +0,0 @@
---
teardown:
- do:
cluster.put_settings:
body:
transient:
xpack.monitoring.exporters.*: null
# delete all watcher indices, so we will start clean again
- do:
indices.delete:
index: .watch*
---
"Watches are installed on startup with local exporter":
- do:
cluster.state:
metric: [ metadata ]
- set: { metadata.cluster_uuid : cluster_uuid }
- do:
xpack.watcher.put_watch:
id: ${cluster_uuid}_kibana_version_mismatch
body: >
{
"trigger" : {
"schedule": {
"interval" : "10m"
}
},
"input" : {
"simple" : {}
},
"actions" : {
"logme" : {
"logging" : {
"text" : "{{ctx}}"
}
}
}
}
- do:
cluster.put_settings:
body:
transient:
xpack.monitoring.exporters.my_local_exporter.type: "local"
xpack.monitoring.exporters.my_local_exporter.cluster_alerts.management.enabled: true
flat_settings: true
- match: {transient: {
"xpack.monitoring.exporters.my_local_exporter.type": "local",
"xpack.monitoring.exporters.my_local_exporter.cluster_alerts.management.enabled": "true"
}}
# sleep
- do:
catch: request_timeout
cluster.health:
wait_for_nodes: 99
timeout: 10s
- match: { "timed_out": true }
- do:
indices.refresh:
index: [ ".watches" ]
- do:
search:
index: .watches
- match: { hits.total: 5 }
- do:
xpack.watcher.get_watch:
id: ${cluster_uuid}_kibana_version_mismatch
# different interval than above means the watch was correctly replaced
- match: { watch.trigger.schedule.interval: "1m" }

View File

@ -1,87 +0,0 @@
---
teardown:
- do:
cluster.put_settings:
body:
transient:
xpack.monitoring.exporters.*: null
# delete all watcher indices, so we will start clean again
- do:
indices.delete:
index: .watch*
---
"Watches are installed on startup with http exporter":
- do:
cluster.state: {}
- set: { metadata.cluster_uuid : cluster_uuid }
- set: { master_node: master }
- do:
nodes.info: {}
- set: { nodes.$master.http.publish_address: http_host }
# install a watch that is going to be overwritten
- do:
xpack.watcher.put_watch:
id: ${cluster_uuid}_elasticsearch_cluster_status
body: >
{
"trigger" : {
"schedule": {
"interval" : "10m"
}
},
"input" : {
"simple" : {}
},
"actions" : {
"logme" : {
"logging" : {
"text" : "{{ctx}}"
}
}
}
}
- do:
cluster.put_settings:
body:
transient:
xpack.monitoring.exporters.my_http_exporter.type: "http"
xpack.monitoring.exporters.my_http_exporter.host: $http_host
xpack.monitoring.exporters.my_http_exporter.cluster_alerts.management.enabled: true
flat_settings: true
- match: {transient: {
"xpack.monitoring.exporters.my_http_exporter.type": "http",
"xpack.monitoring.exporters.my_http_exporter.host": "$http_host",
"xpack.monitoring.exporters.my_http_exporter.cluster_alerts.management.enabled": "true"
}}
# sleep
- do:
catch: request_timeout
cluster.health:
wait_for_nodes: 99
timeout: 10s
- match: { "timed_out": true }
- do:
indices.refresh:
index: [ ".watches" ]
- do:
search:
index: .watches
- match: { hits.total: 5 }
- do:
xpack.watcher.get_watch:
id: ${cluster_uuid}_elasticsearch_cluster_status
# different interval than above means the watch was correctly replaced
- match: { watch.trigger.schedule.interval: "1m" }