Merge branch 'master' into feature/sql
Original commit: elastic/x-pack-elasticsearch@e5809f0785
This commit is contained in:
commit
bcd9934050
|
@ -7,16 +7,21 @@ package org.elasticsearch.smoketest;
|
|||
|
||||
import com.carrotsearch.randomizedtesting.annotations.Name;
|
||||
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.client.http.HttpHost;
|
||||
import org.elasticsearch.common.settings.SecureString;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.test.rest.yaml.ClientYamlDocsTestClient;
|
||||
import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
|
||||
import org.elasticsearch.test.rest.yaml.ClientYamlTestClient;
|
||||
import org.elasticsearch.test.rest.yaml.ClientYamlTestResponse;
|
||||
import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;
|
||||
import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestSpec;
|
||||
import org.junit.After;
|
||||
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
|
@ -52,6 +57,12 @@ public class XDocsClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
|
|||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ClientYamlTestClient initClientYamlTestClient(ClientYamlSuiteRestSpec restSpec, RestClient restClient,
|
||||
List<HttpHost> hosts, Version esVersion) throws IOException {
|
||||
return new ClientYamlDocsTestClient(restSpec, restClient, hosts, esVersion);
|
||||
}
|
||||
|
||||
/**
|
||||
* All tests run as a an administrative user but use <code>es-shield-runas-user</code> to become a less privileged user.
|
||||
*/
|
||||
|
|
|
@ -23,6 +23,8 @@ import org.elasticsearch.action.search.ShardSearchFailure;
|
|||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlock;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MappingMetaData;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
|
@ -84,7 +86,6 @@ import java.util.Objects;
|
|||
import java.util.function.BiFunction;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class JobProvider {
|
||||
private static final Logger LOGGER = Loggers.getLogger(JobProvider.class);
|
||||
|
@ -137,15 +138,42 @@ public class JobProvider {
|
|||
.setQuery(QueryBuilders.termQuery(Job.ID.getPreferredName(), job.getId()))
|
||||
.setSize(1);
|
||||
|
||||
MultiSearchRequestBuilder msearch = client.prepareMultiSearch()
|
||||
.add(stateDocSearch)
|
||||
.add(resultDocSearch)
|
||||
.add(quantilesDocSearch);
|
||||
|
||||
ActionListener<MultiSearchResponse> searchResponseActionListener = new ActionListener<MultiSearchResponse>() {
|
||||
@Override
|
||||
public void onResponse(MultiSearchResponse searchResponse) {
|
||||
List<SearchHit> searchHits = Arrays.stream(searchResponse.getResponses())
|
||||
.flatMap(item -> Arrays.stream(item.getResponse().getHits().getHits()))
|
||||
.collect(Collectors.toList());
|
||||
public void onResponse(MultiSearchResponse response) {
|
||||
List<SearchHit> searchHits = new ArrayList<>();
|
||||
// Consider the possibility that some of the responses are exceptions
|
||||
for (int i = 0; i < response.getResponses().length; i++) {
|
||||
MultiSearchResponse.Item itemResponse = response.getResponses()[i];
|
||||
if (itemResponse.isFailure()) {
|
||||
Exception e = itemResponse.getFailure();
|
||||
// There's a further complication, which is that msearch doesn't translate a
|
||||
// closed index cluster block exception into a friendlier index closed exception
|
||||
if (e instanceof ClusterBlockException) {
|
||||
for (ClusterBlock block : ((ClusterBlockException) e).blocks()) {
|
||||
if ("index closed".equals(block.description())) {
|
||||
SearchRequest searchRequest = msearch.request().requests().get(i);
|
||||
// Don't wrap the original exception, because then it would be the root cause
|
||||
// and Kibana would display it in preference to the friendlier exception
|
||||
e = ExceptionsHelper.badRequestException("Cannot create job [{}] as it requires closed index {}",
|
||||
job.getId(), searchRequest.indices());
|
||||
}
|
||||
}
|
||||
}
|
||||
listener.onFailure(e);
|
||||
return;
|
||||
}
|
||||
searchHits.addAll(Arrays.asList(itemResponse.getResponse().getHits().getHits()));
|
||||
}
|
||||
|
||||
if (searchHits.isEmpty() == false) {
|
||||
if (searchHits.isEmpty()) {
|
||||
listener.onResponse(true);
|
||||
} else {
|
||||
int quantileDocCount = 0;
|
||||
int categorizerStateDocCount = 0;
|
||||
int resultDocCount = 0;
|
||||
|
@ -168,9 +196,6 @@ public class JobProvider {
|
|||
"[" + resultDocCount + "] result and [" + (quantileDocCount + categorizerStateDocCount) +
|
||||
"] state documents exist for a prior job with Id [" + job.getId() + "]. " +
|
||||
"Please create the job with a different Id"));
|
||||
return;
|
||||
} else {
|
||||
listener.onResponse(true);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -180,11 +205,7 @@ public class JobProvider {
|
|||
}
|
||||
};
|
||||
|
||||
client.prepareMultiSearch()
|
||||
.add(stateDocSearch)
|
||||
.add(resultDocSearch)
|
||||
.add(quantilesDocSearch)
|
||||
.execute(searchResponseActionListener);
|
||||
msearch.execute(searchResponseActionListener);
|
||||
}
|
||||
|
||||
|
||||
|
@ -358,7 +379,7 @@ public class JobProvider {
|
|||
LOGGER.debug("Found 0 hits for [{}/{}]", searchRequest.indices(), searchRequest.types());
|
||||
} else if (hitsCount == 1) {
|
||||
parseAutodetectParamSearchHit(jobId, paramsBuilder, hits.getAt(0), errorHandler);
|
||||
} else if (hitsCount > 1) {
|
||||
} else {
|
||||
errorHandler.accept(new IllegalStateException("Expected hits count to be 0 or 1, but got ["
|
||||
+ hitsCount + "]"));
|
||||
}
|
||||
|
|
|
@ -866,3 +866,57 @@
|
|||
"data_description" : {}
|
||||
}
|
||||
|
||||
---
|
||||
"Test put job after closing results index":
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: ".ml-anomalies-shared"
|
||||
|
||||
- do:
|
||||
indices.close:
|
||||
index: ".ml-anomalies-shared"
|
||||
|
||||
- do:
|
||||
catch: /Cannot create job \[closed-results-job\] as it requires closed index \[\.ml-anomalies-shared\]/
|
||||
xpack.ml.put_job:
|
||||
job_id: closed-results-job
|
||||
body: >
|
||||
{
|
||||
"description":"Analysis of response time by airline",
|
||||
"analysis_config" : {
|
||||
"bucket_span": "1h",
|
||||
"detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}]
|
||||
},
|
||||
"data_description" : {
|
||||
"time_field":"time"
|
||||
}
|
||||
}
|
||||
|
||||
---
|
||||
"Test put job after closing state index":
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: ".ml-state"
|
||||
|
||||
- do:
|
||||
indices.close:
|
||||
index: ".ml-state"
|
||||
|
||||
- do:
|
||||
catch: /Cannot create job \[closed-results-job\] as it requires closed index \[\.ml-state\]/
|
||||
xpack.ml.put_job:
|
||||
job_id: closed-results-job
|
||||
body: >
|
||||
{
|
||||
"description":"Analysis of response time by airline",
|
||||
"analysis_config" : {
|
||||
"bucket_span": "1h",
|
||||
"detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}]
|
||||
},
|
||||
"data_description" : {
|
||||
"time_field":"time"
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -6,13 +6,13 @@
|
|||
package org.elasticsearch.upgrades;
|
||||
|
||||
import com.google.common.base.Charsets;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.client.http.HttpHost;
|
||||
import org.elasticsearch.client.http.entity.ContentType;
|
||||
import org.elasticsearch.client.http.entity.StringEntity;
|
||||
import org.elasticsearch.client.http.util.EntityUtils;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.common.CheckedConsumer;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
|
@ -21,6 +21,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext;
|
|||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.test.junit.annotations.TestLogging;
|
||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||
import org.elasticsearch.test.rest.yaml.ObjectPath;
|
||||
import org.elasticsearch.xpack.watcher.condition.AlwaysCondition;
|
||||
|
@ -52,6 +53,7 @@ import static org.hamcrest.Matchers.equalTo;
|
|||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
|
||||
@TestLogging("org.elasticsearch.client:TRACE")
|
||||
public class WatchBackwardsCompatibilityIT extends ESRestTestCase {
|
||||
|
||||
private Nodes nodes;
|
||||
|
@ -135,22 +137,18 @@ public class WatchBackwardsCompatibilityIT extends ESRestTestCase {
|
|||
}
|
||||
|
||||
public void testWatcherRestart() throws Exception {
|
||||
// TODO we should be able to run this against any node, once the bwc serialization issues are fixed
|
||||
executeAgainstMasterNode(client -> {
|
||||
assertOK(client.performRequest("POST", "/_xpack/watcher/_stop"));
|
||||
assertBusy(() -> {
|
||||
try (InputStream is = client.performRequest("GET", "_xpack/watcher/stats").getEntity().getContent()) {
|
||||
// TODO once the serialization fix is in here, we can check for concrete fields if the run against a 5.x or a 6.x node
|
||||
// using a checkedbiconsumer, that provides info against which node the request runs
|
||||
String responseBody = Streams.copyToString(new InputStreamReader(is, Charsets.UTF_8));
|
||||
assertThat(responseBody, not(containsString("\"watcher_state\":\"starting\"")));
|
||||
assertThat(responseBody, not(containsString("\"watcher_state\":\"started\"")));
|
||||
assertThat(responseBody, not(containsString("\"watcher_state\":\"stopping\"")));
|
||||
}
|
||||
});
|
||||
});
|
||||
executeAgainstRandomNode(client -> assertOK(client.performRequest("POST", "/_xpack/watcher/_stop")));
|
||||
executeAgainstMasterNode(client -> assertBusy(() -> {
|
||||
try (InputStream is = client.performRequest("GET", "_xpack/watcher/stats").getEntity().getContent()) {
|
||||
// TODO once the serialization fix is in here, we can check for concrete fields if the run against a 5.x or a 6.x node
|
||||
// using a checkedbiconsumer, that provides info against which node the request runs
|
||||
String responseBody = Streams.copyToString(new InputStreamReader(is, Charsets.UTF_8));
|
||||
assertThat(responseBody, not(containsString("\"watcher_state\":\"starting\"")));
|
||||
assertThat(responseBody, not(containsString("\"watcher_state\":\"started\"")));
|
||||
assertThat(responseBody, not(containsString("\"watcher_state\":\"stopping\"")));
|
||||
}
|
||||
}));
|
||||
|
||||
// TODO remove this again, as the upgrade API should take care of this
|
||||
// currently the triggered watches index is not checked by the upgrade API, resulting in an existing index
|
||||
// that has not configured the `index.format: 6`, resulting in watcher not starting
|
||||
Map<String, String> params = new HashMap<>();
|
||||
|
@ -160,20 +158,17 @@ public class WatchBackwardsCompatibilityIT extends ESRestTestCase {
|
|||
|
||||
executeUpgradeIfNeeded();
|
||||
|
||||
// TODO we should be able to run this against any node, once the bwc serialization issues are fixed
|
||||
executeAgainstMasterNode(client -> {
|
||||
assertOK(client.performRequest("POST", "/_xpack/watcher/_start"));
|
||||
assertBusy(() -> {
|
||||
try (InputStream is = client.performRequest("GET", "_xpack/watcher/stats").getEntity().getContent()) {
|
||||
// TODO once the serialization fix is in here, we can check for concrete fields if the run against a 5.x or a 6.x node
|
||||
// using a checkedbiconsumer, that provides info against which node the request runs
|
||||
String responseBody = Streams.copyToString(new InputStreamReader(is, Charsets.UTF_8));
|
||||
assertThat(responseBody, not(containsString("\"watcher_state\":\"starting\"")));
|
||||
assertThat(responseBody, not(containsString("\"watcher_state\":\"stopping\"")));
|
||||
assertThat(responseBody, not(containsString("\"watcher_state\":\"stopped\"")));
|
||||
}
|
||||
});
|
||||
});
|
||||
executeAgainstRandomNode(client -> assertOK(client.performRequest("POST", "/_xpack/watcher/_start")));
|
||||
executeAgainstMasterNode(client -> assertBusy(() -> {
|
||||
try (InputStream is = client.performRequest("GET", "_xpack/watcher/stats").getEntity().getContent()) {
|
||||
// TODO once the serialization fix is in here, we can check for concrete fields if the run against a 5.x or a 6.x node
|
||||
// using a checkedbiconsumer, that provides info against which node the request runs
|
||||
String responseBody = Streams.copyToString(new InputStreamReader(is, Charsets.UTF_8));
|
||||
assertThat(responseBody, not(containsString("\"watcher_state\":\"starting\"")));
|
||||
assertThat(responseBody, not(containsString("\"watcher_state\":\"stopping\"")));
|
||||
assertThat(responseBody, not(containsString("\"watcher_state\":\"stopped\"")));
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
public void testWatchCrudApis() throws IOException {
|
||||
|
@ -234,7 +229,7 @@ public class WatchBackwardsCompatibilityIT extends ESRestTestCase {
|
|||
}
|
||||
|
||||
private void executeAgainstMasterNode(CheckedConsumer<RestClient, Exception> consumer) throws Exception {
|
||||
try (RestClient client = buildClient(restClientSettings(), new HttpHost[] { this.nodes.getMaster().publishAddress })) {
|
||||
try (RestClient client = buildClient(restClientSettings(), new HttpHost[]{this.nodes.getMaster().publishAddress})) {
|
||||
consumer.accept(client);
|
||||
}
|
||||
}
|
||||
|
@ -318,14 +313,6 @@ public class WatchBackwardsCompatibilityIT extends ESRestTestCase {
|
|||
return Version.fromId(values().stream().map(node -> node.getVersion().id).min(Integer::compareTo).get());
|
||||
}
|
||||
|
||||
public Node getSafe(String id) {
|
||||
Node node = get(id);
|
||||
if (node == null) {
|
||||
throw new IllegalArgumentException("node with id [" + id + "] not found");
|
||||
}
|
||||
return node;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "Nodes{" +
|
||||
|
|
|
@ -35,6 +35,8 @@ integTestRunner {
|
|||
'ml/jobs_crud/Test cannot create job with existing result document',
|
||||
'ml/jobs_crud/Test cannot create job with model snapshot id set',
|
||||
'ml/jobs_crud/Test get job API with non existing job id',
|
||||
'ml/jobs_crud/Test put job after closing results index',
|
||||
'ml/jobs_crud/Test put job after closing state index',
|
||||
'ml/jobs_crud/Test put job with inconsistent body/param ids',
|
||||
'ml/jobs_crud/Test put job with time field in analysis_config',
|
||||
'ml/jobs_get/Test get job given missing job_id',
|
||||
|
|
Loading…
Reference in New Issue