From c96f2d7bf7981b2641c4dbc3499ecbc1331320b4 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Mon, 14 May 2018 12:14:37 +0200 Subject: [PATCH 01/31] Document woes between auto-expand-replicas and allocation filtering (#30531) Relates to #2869 --- docs/reference/index-modules.asciidoc | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index ed0077a629d..54c0c1c1b15 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -103,9 +103,14 @@ specific index module: `index.auto_expand_replicas`:: - Auto-expand the number of replicas based on the number of available nodes. + Auto-expand the number of replicas based on the number of data nodes in the cluster. Set to a dash delimited lower and upper bound (e.g. `0-5`) or use `all` - for the upper bound (e.g. `0-all`). Defaults to `false` (i.e. disabled). + for the upper bound (e.g. `0-all`). Defaults to `false` (i.e. disabled). + Note that the auto-expanded number of replicas does not take any other allocation + rules into account, such as <>, + <> or <>, + and this can lead to the cluster health becoming `YELLOW` if the applicable rules + prevent all the replicas from being allocated. `index.search.idle.after`:: How long a shard can not receive a search or get request until it's considered From 6a8aa99e3f7e388fc6b43c409a8f9009ee36fa68 Mon Sep 17 00:00:00 2001 From: David Roberts Date: Mon, 14 May 2018 12:49:37 +0100 Subject: [PATCH 02/31] [TEST] Mute ML test that needs updating to following ml-cpp changes Relates #30399 --- .../java/org/elasticsearch/xpack/ml/integration/ForecastIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java index a5fc1575f48..14bdd533c6b 100644 --- a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java +++ b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java @@ -206,6 +206,7 @@ public class ForecastIT extends MlNativeAutodetectIntegTestCase { assertThat(e.getMessage(), equalTo("Cannot run forecast: Forecast cannot be executed as model memory status is not OK")); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/pull/30399") public void testMemoryLimit() throws Exception { Detector.Builder detector = new Detector.Builder("mean", "value"); detector.setByFieldName("clientIP"); From 9a5555963bcf949c54960e4e2995fc3d1da4db08 Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Mon, 14 May 2018 16:06:56 +0300 Subject: [PATCH 03/31] Add missing dependencies on testClasses (#30527) --- .../src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy | 1 + modules/repository-url/build.gradle | 1 + 2 files changed, 2 insertions(+) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 9cc5bb82552..85fe712fd8d 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -744,6 +744,7 @@ class BuildPlugin implements Plugin { additionalTest.testClassesDir = test.testClassesDir additionalTest.configure(commonTestConfig(project)) additionalTest.configure(config) + additionalTest.dependsOn(project.tasks.testClasses) test.dependsOn(additionalTest) }); return test diff --git a/modules/repository-url/build.gradle b/modules/repository-url/build.gradle index 79fe5e7aaef..62aad486ad8 100644 --- a/modules/repository-url/build.gradle +++ b/modules/repository-url/build.gradle @@ -34,6 +34,7 @@ File repositoryDir = new File(project.buildDir, "shared-repository") /** A task to start the URLFixture which exposes the repositoryDir over HTTP **/ task urlFixture(type: AntFixture) { + dependsOn testClasses doFirst { repositoryDir.mkdirs() } From 1a7110524f48115e3c61e26b41d3a58bc82a4b45 Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Mon, 14 May 2018 13:19:39 +0000 Subject: [PATCH 04/31] [TEST] Fix typo in MovAvgIT test The second set of assertions was accidentally using the count's moving average for the error delta in the value's moving average assertion. This fixes the typo, and unmutes the test. Closes #29456 --- .../search/aggregations/pipeline/moving/avg/MovAvgIT.java | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java index 43c7010d4b0..73a3c553b4d 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java @@ -19,9 +19,7 @@ package org.elasticsearch.search.aggregations.pipeline.moving.avg; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.action.bulk.BulkRequestBuilder; -import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; @@ -45,7 +43,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuil import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matchers; -import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -69,7 +66,6 @@ import static org.hamcrest.core.IsNull.notNullValue; import static org.hamcrest.core.IsNull.nullValue; @ESIntegTestCase.SuiteScopeTestCase -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29456") public class MovAvgIT extends ESIntegTestCase { private static final String INTERVAL_FIELD = "l_value"; private static final String VALUE_FIELD = "v_value"; @@ -1308,7 +1304,7 @@ public class MovAvgIT extends ESIntegTestCase { } else { assertThat("[value] movavg is null", valuesMovAvg, notNullValue()); assertEquals("[value] movavg does not match expected [" + valuesMovAvg.value() + " vs " + expectedValue + "]", - valuesMovAvg.value(), expectedValue, 0.1 * Math.abs(countMovAvg.value())); + valuesMovAvg.value(), expectedValue, 0.1 * Math.abs(valuesMovAvg.value())); } } From b8bf4807426cd60b49514a97ecd5f5126025eb58 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 14 May 2018 10:37:53 -0400 Subject: [PATCH 05/31] Clients: Switch to new performRequest (#30543) Switch several calls in the client projects from the deprecated `performRequest` calls to the new version. --- .../benchmark/rest/RestClientBenchmark.java | 20 +++------ .../elasticsearch/client/BulkProcessorIT.java | 10 ++--- .../java/org/elasticsearch/client/CrudIT.java | 43 ++++++++----------- .../documentation/CRUDDocumentationIT.java | 13 +++--- .../RestClientSingleHostIntegTests.java | 5 ++- .../client/RestClientSingleHostTests.java | 2 - 6 files changed, 38 insertions(+), 55 deletions(-) diff --git a/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/rest/RestClientBenchmark.java b/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/rest/RestClientBenchmark.java index 9210526e7c8..d32c37dc2c4 100644 --- a/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/rest/RestClientBenchmark.java +++ b/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/rest/RestClientBenchmark.java @@ -18,27 +18,19 @@ */ package org.elasticsearch.client.benchmark.rest; -import org.apache.http.HttpEntity; import org.apache.http.HttpHeaders; import org.apache.http.HttpHost; import org.apache.http.HttpStatus; -import org.apache.http.client.config.RequestConfig; -import org.apache.http.conn.ConnectionKeepAliveStrategy; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; -import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; import org.apache.http.message.BasicHeader; -import org.apache.http.nio.entity.NStringEntity; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestClient; -import org.elasticsearch.client.RestClientBuilder; import org.elasticsearch.client.benchmark.AbstractBenchmark; import org.elasticsearch.client.benchmark.ops.bulk.BulkRequestExecutor; import org.elasticsearch.client.benchmark.ops.search.SearchRequestExecutor; import java.io.IOException; -import java.nio.charset.StandardCharsets; import java.util.Collections; import java.util.List; import java.util.Locale; @@ -86,9 +78,10 @@ public final class RestClientBenchmark extends AbstractBenchmark { bulkRequestBody.append(bulkItem); bulkRequestBody.append("\n"); } - HttpEntity entity = new NStringEntity(bulkRequestBody.toString(), ContentType.APPLICATION_JSON); + Request request = new Request("POST", "/geonames/type/_noop_bulk"); + request.setJsonEntity(bulkRequestBody.toString()); try { - Response response = client.performRequest("POST", "/geonames/type/_noop_bulk", Collections.emptyMap(), entity); + Response response = client.performRequest(request); return response.getStatusLine().getStatusCode() == HttpStatus.SC_OK; } catch (Exception e) { throw new ElasticsearchException(e); @@ -107,9 +100,10 @@ public final class RestClientBenchmark extends AbstractBenchmark { @Override public boolean search(String source) { - HttpEntity searchBody = new NStringEntity(source, StandardCharsets.UTF_8); + Request request = new Request("GET", endpoint); + request.setJsonEntity(source); try { - Response response = client.performRequest("GET", endpoint, Collections.emptyMap(), searchBody); + Response response = client.performRequest(request); return response.getStatusLine().getStatusCode() == HttpStatus.SC_OK; } catch (IOException e) { throw new ElasticsearchException(e); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java index 7f59fcc8312..9782b1016b4 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java @@ -194,18 +194,16 @@ public class BulkProcessorIT extends ESRestHighLevelClientTestCase { } public void testBulkProcessorConcurrentRequestsReadOnlyIndex() throws Exception { - - String createIndexBody = "{\n" + + Request request = new Request("PUT", "/test-ro"); + request.setJsonEntity("{\n" + " \"settings\" : {\n" + " \"index\" : {\n" + " \"blocks.write\" : true\n" + " }\n" + " }\n" + " \n" + - "}"; - - NStringEntity entity = new NStringEntity(createIndexBody, ContentType.APPLICATION_JSON); - Response response = client().performRequest("PUT", "/test-ro", Collections.emptyMap(), entity); + "}"); + Response response = client().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); int bulkActions = randomIntBetween(10, 100); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java index ee820871dbb..f384e5706b0 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java @@ -19,9 +19,6 @@ package org.elasticsearch.client; -import org.apache.http.client.methods.HttpPut; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.DocWriteRequest; @@ -39,6 +36,7 @@ import org.elasticsearch.action.get.MultiGetRequest; import org.elasticsearch.action.get.MultiGetResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.common.Strings; @@ -147,11 +145,10 @@ public class CrudIT extends ESRestHighLevelClientTestCase { GetRequest getRequest = new GetRequest("index", "type", "id"); assertFalse(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync)); } - String document = "{\"field1\":\"value1\",\"field2\":\"value2\"}"; - StringEntity stringEntity = new StringEntity(document, ContentType.APPLICATION_JSON); - Response response = client().performRequest(HttpPut.METHOD_NAME, "/index/type/id", Collections.singletonMap("refresh", "wait_for"), - stringEntity); - assertEquals(201, response.getStatusLine().getStatusCode()); + IndexRequest index = new IndexRequest("index", "type", "id"); + index.source("{\"field1\":\"value1\",\"field2\":\"value2\"}", XContentType.JSON); + index.setRefreshPolicy(RefreshPolicy.IMMEDIATE); + highLevelClient().index(index); { GetRequest getRequest = new GetRequest("index", "type", "id"); assertTrue(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync)); @@ -175,12 +172,11 @@ public class CrudIT extends ESRestHighLevelClientTestCase { assertEquals("Elasticsearch exception [type=index_not_found_exception, reason=no such index]", exception.getMessage()); assertEquals("index", exception.getMetadata("es.index").get(0)); } - + IndexRequest index = new IndexRequest("index", "type", "id"); String document = "{\"field1\":\"value1\",\"field2\":\"value2\"}"; - StringEntity stringEntity = new StringEntity(document, ContentType.APPLICATION_JSON); - Response response = client().performRequest(HttpPut.METHOD_NAME, "/index/type/id", Collections.singletonMap("refresh", "wait_for"), - stringEntity); - assertEquals(201, response.getStatusLine().getStatusCode()); + index.source(document, XContentType.JSON); + index.setRefreshPolicy(RefreshPolicy.IMMEDIATE); + highLevelClient().index(index); { GetRequest getRequest = new GetRequest("index", "type", "id").version(2); ElasticsearchException exception = expectThrows(ElasticsearchException.class, @@ -271,18 +267,15 @@ public class CrudIT extends ESRestHighLevelClientTestCase { assertEquals("Elasticsearch exception [type=index_not_found_exception, reason=no such index]", response.getResponses()[1].getFailure().getFailure().getMessage()); } - - String document = "{\"field\":\"value1\"}"; - StringEntity stringEntity = new StringEntity(document, ContentType.APPLICATION_JSON); - Response r = client().performRequest(HttpPut.METHOD_NAME, "/index/type/id1", Collections.singletonMap("refresh", "true"), - stringEntity); - assertEquals(201, r.getStatusLine().getStatusCode()); - - document = "{\"field\":\"value2\"}"; - stringEntity = new StringEntity(document, ContentType.APPLICATION_JSON); - r = client().performRequest(HttpPut.METHOD_NAME, "/index/type/id2", Collections.singletonMap("refresh", "true"), stringEntity); - assertEquals(201, r.getStatusLine().getStatusCode()); - + BulkRequest bulk = new BulkRequest(); + bulk.setRefreshPolicy(RefreshPolicy.IMMEDIATE); + IndexRequest index = new IndexRequest("index", "type", "id1"); + index.source("{\"field\":\"value1\"}", XContentType.JSON); + bulk.add(index); + index = new IndexRequest("index", "type", "id2"); + index.source("{\"field\":\"value2\"}", XContentType.JSON); + bulk.add(index); + highLevelClient().bulk(bulk); { MultiGetRequest multiGetRequest = new MultiGetRequest(); multiGetRequest.add("index", "type", "id1"); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java index 9b0b1ab83a4..6641aa2fc7d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java @@ -19,8 +19,6 @@ package org.elasticsearch.client.documentation; -import org.apache.http.HttpEntity; -import org.apache.http.client.methods.HttpPost; import org.apache.http.entity.ContentType; import org.apache.http.nio.entity.NStringEntity; import org.elasticsearch.ElasticsearchException; @@ -49,6 +47,7 @@ import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.client.ESRestHighLevelClientTestCase; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.common.Strings; @@ -58,6 +57,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.get.GetResult; import org.elasticsearch.rest.RestStatus; @@ -271,16 +271,15 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { IndexResponse indexResponse = client.index(indexRequest); assertSame(indexResponse.status(), RestStatus.CREATED); - XContentType xContentType = XContentType.JSON; - String script = Strings.toString(XContentBuilder.builder(xContentType.xContent()) + Request request = new Request("POST", "/_scripts/increment-field"); + request.setJsonEntity(Strings.toString(JsonXContent.contentBuilder() .startObject() .startObject("script") .field("lang", "painless") .field("code", "ctx._source.field += params.count") .endObject() - .endObject()); - HttpEntity body = new NStringEntity(script, ContentType.create(xContentType.mediaType())); - Response response = client().performRequest(HttpPost.METHOD_NAME, "/_scripts/increment-field", emptyMap(), body); + .endObject())); + Response response = client().performRequest(request); assertEquals(response.getStatusLine().getStatusCode(), RestStatus.OK.getStatus()); } { diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java index 5979c508de2..667e38a5167 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java @@ -351,11 +351,12 @@ public class RestClientSingleHostIntegTests extends RestClientTestCase { private Response bodyTest(final RestClient restClient, final String method) throws IOException { String requestBody = "{ \"field\": \"value\" }"; - StringEntity entity = new StringEntity(requestBody, ContentType.APPLICATION_JSON); int statusCode = randomStatusCode(getRandom()); + Request request = new Request(method, "/" + statusCode); + request.setJsonEntity(requestBody); Response esResponse; try { - esResponse = restClient.performRequest(method, "/" + statusCode, Collections.emptyMap(), entity); + esResponse = restClient.performRequest(request); } catch(ResponseException e) { esResponse = e.getResponse(); } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java index 2d419b213d6..714d2e57e6d 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java @@ -58,11 +58,9 @@ import java.net.SocketTimeoutException; import java.net.URI; import java.util.Arrays; import java.util.Collections; -import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; -import java.util.TreeMap; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; From 41148e4bb11ddcfcd24e5a95607aee5c85d1843d Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 14 May 2018 11:11:27 -0400 Subject: [PATCH 06/31] Docs: Update HighLevelRestClient migration docs (#30544) The High Level REST Client's documentation suggested that users should use the Low Level REST Client for index management activities. This change removes that suggestion because the high level REST client supports those APIs now. This also changes the examples in the migration docs to that still use the Low Level REST Client to use the non-deprecated varieats of `performRequest`. --- .../MigrationDocumentationIT.java | 53 ++---------- docs/java-rest/high-level/migration.asciidoc | 82 ++----------------- 2 files changed, 16 insertions(+), 119 deletions(-) diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MigrationDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MigrationDocumentationIT.java index 650ab882c36..489d4d9b1ed 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MigrationDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MigrationDocumentationIT.java @@ -30,6 +30,7 @@ import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.client.ESRestHighLevelClientTestCase; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.cluster.health.ClusterHealthStatus; @@ -66,58 +67,22 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF * -------------------------------------------------- */ public class MigrationDocumentationIT extends ESRestHighLevelClientTestCase { - - public void testCreateIndex() throws IOException { - RestHighLevelClient client = highLevelClient(); - { - //tag::migration-create-index - Settings indexSettings = Settings.builder() // <1> - .put(SETTING_NUMBER_OF_SHARDS, 1) - .put(SETTING_NUMBER_OF_REPLICAS, 0) - .build(); - - String payload = Strings.toString(XContentFactory.jsonBuilder() // <2> - .startObject() - .startObject("settings") // <3> - .value(indexSettings) - .endObject() - .startObject("mappings") // <4> - .startObject("doc") - .startObject("properties") - .startObject("time") - .field("type", "date") - .endObject() - .endObject() - .endObject() - .endObject() - .endObject()); - - HttpEntity entity = new NStringEntity(payload, ContentType.APPLICATION_JSON); // <5> - - Response response = client.getLowLevelClient().performRequest("PUT", "my-index", emptyMap(), entity); // <6> - if (response.getStatusLine().getStatusCode() != HttpStatus.SC_OK) { - // <7> - } - //end::migration-create-index - assertEquals(200, response.getStatusLine().getStatusCode()); - } - } - public void testClusterHealth() throws IOException { RestHighLevelClient client = highLevelClient(); { //tag::migration-cluster-health - Map parameters = singletonMap("wait_for_status", "green"); - Response response = client.getLowLevelClient().performRequest("GET", "/_cluster/health", parameters); // <1> + Request request = new Request("GET", "/_cluster/health"); + request.addParameter("wait_for_status", "green"); // <1> + Response response = client.getLowLevelClient().performRequest(request); // <2> ClusterHealthStatus healthStatus; - try (InputStream is = response.getEntity().getContent()) { // <2> - Map map = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true); // <3> - healthStatus = ClusterHealthStatus.fromString((String) map.get("status")); // <4> + try (InputStream is = response.getEntity().getContent()) { // <3> + Map map = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true); // <4> + healthStatus = ClusterHealthStatus.fromString((String) map.get("status")); // <5> } - if (healthStatus == ClusterHealthStatus.GREEN) { - // <5> + if (healthStatus != ClusterHealthStatus.GREEN) { + // <6> } //end::migration-cluster-health assertSame(ClusterHealthStatus.GREEN, healthStatus); diff --git a/docs/java-rest/high-level/migration.asciidoc b/docs/java-rest/high-level/migration.asciidoc index 1349ccb35fe..ad4e0613fc1 100644 --- a/docs/java-rest/high-level/migration.asciidoc +++ b/docs/java-rest/high-level/migration.asciidoc @@ -2,7 +2,7 @@ == Migration Guide This section describes how to migrate existing code from the `TransportClient` -to the new Java High Level REST Client released with the version 5.6.0 +to the Java High Level REST Client released with the version 5.6.0 of Elasticsearch. === Motivations around a new Java client @@ -107,9 +107,6 @@ More importantly, the high-level client: request constructors like `new IndexRequest()` to create requests objects. The requests are then executed using synchronous or asynchronous dedicated methods like `client.index()` or `client.indexAsync()`. -- does not provide indices or cluster management APIs. Management -operations can be executed by external scripts or -<>. ==== How to migrate the way requests are built @@ -241,71 +238,6 @@ returned by the cluster. <4> The `onFailure()` method is called when an error occurs during the execution of the request. -[[java-rest-high-level-migration-manage-indices]] -==== Manage Indices using the Low-Level REST Client - -The low-level client is able to execute any kind of HTTP requests, and can -therefore be used to call the APIs that are not yet supported by the high level client. - -For example, creating a new index with the `TransportClient` may look like this: - -[source,java] --------------------------------------------------- -Settings settings = Settings.builder() // <1> - .put(SETTING_NUMBER_OF_SHARDS, 1) - .put(SETTING_NUMBER_OF_REPLICAS, 0) - .build(); - -String mappings = XContentFactory.jsonBuilder() // <2> - .startObject() - .startObject("doc") - .startObject("properties") - .startObject("time") - .field("type", "date") - .endObject() - .endObject() - .endObject() - .endObject() - .string(); - -CreateIndexResponse response = transportClient.admin().indices() // <3> - .prepareCreate("my-index") - .setSettings(indexSettings) - .addMapping("doc", docMapping, XContentType.JSON) - .get(); - -if (response.isAcknowledged() == false) { - // <4> -} --------------------------------------------------- -<1> Define the settings of the index -<2> Define the mapping for document of type `doc` using a -`XContentBuilder` -<3> Create the index with the previous settings and mapping -using the `prepareCreate()` method. The execution is synchronous -and blocks on the `get()` method until the remote cluster returns -a response. -<4> Handle the situation where the index has not been created - -The same operation executed with the low-level client could be: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/MigrationDocumentationIT.java[migration-create-index] --------------------------------------------------- -<1> Define the settings of the index -<2> Define the body of the HTTP request using a `XContentBuilder` with JSON format -<3> Include the settings in the request body -<4> Include the mappings in the request body -<5> Convert the request body from `String` to a `HttpEntity` and -set its content type (here, JSON) -<6> Execute the request using the low-level client. The execution is synchronous -and blocks on the `performRequest()` method until the remote cluster returns -a response. The low-level client can be retrieved from an existing `RestHighLevelClient` -instance through the `getLowLevelClient` getter method. -<7> Handle the situation where the index has not been created - - [[java-rest-high-level-migration-cluster-health]] ==== Checking Cluster Health using the Low-Level REST Client @@ -331,18 +263,18 @@ With the low-level client, the code can be changed to: -------------------------------------------------- include-tagged::{doc-tests}/MigrationDocumentationIT.java[migration-cluster-health] -------------------------------------------------- -<1> Call the cluster's health REST endpoint and wait for the cluster health to become green, -then get back a `Response` object. -<2> Retrieve an `InputStream` object in order to read the response's content -<3> Parse the response's content using Elasticsearch's helper class `XContentHelper`. This +<1> Set up the request to wait for the cluster's health to become green if it isn't already. +<2> Make the request and the get back a `Response` object. +<3> Retrieve an `InputStream` object in order to read the response's content +<4> Parse the response's content using Elasticsearch's helper class `XContentHelper`. This helper requires the content type of the response to be passed as an argument and returns a `Map` of objects. Values in the map can be of any type, including inner `Map` that are used to represent the JSON object hierarchy. -<4> Retrieve the value of the `status` field in the response map, casts it as a a `String` +<5> Retrieve the value of the `status` field in the response map, casts it as a a `String` object and use the `ClusterHealthStatus.fromString()` method to convert it as a `ClusterHealthStatus` object. This method throws an exception if the value does not corresponds to a valid cluster health status. -<5> Handle the situation where the cluster's health is not green +<6> Handle the situation where the cluster's health is not green Note that for convenience this example uses Elasticsearch's helpers to parse the JSON response body, but any other JSON parser could have been use instead. From cc93131318057f53ef59efe6beceb50e4d139406 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Mon, 14 May 2018 17:36:26 +0200 Subject: [PATCH 07/31] Forbid expensive query parts in ranking evaluation (#30151) Currently the ranking evaluation API accepts the full query syntax for the queries specified in the evaluation set and executes them via multi search. This potentially runs costly aggregations and suggestions too. This change adds checks that forbid using aggregations, suggesters, highlighters and the explain and profile options in the queries that are run as part of the ranking evaluation since they are irrelevent in the context of this API. --- .../index/rankeval/RankEvalSpec.java | 6 +- .../index/rankeval/RatedRequest.java | 101 ++++++++++++------ .../rankeval/TransportRankEvalAction.java | 17 +-- .../index/rankeval/RatedRequestsTests.java | 60 +++++++++-- .../rankeval/SmokeMultipleTemplatesIT.java | 38 +++++++ 5 files changed, 171 insertions(+), 51 deletions(-) diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalSpec.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalSpec.java index 8e0828fcfca..22875139c9b 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalSpec.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalSpec.java @@ -57,7 +57,7 @@ public class RankEvalSpec implements Writeable, ToXContentObject { /** Default max number of requests. */ private static final int MAX_CONCURRENT_SEARCHES = 10; /** optional: Templates to base test requests on */ - private Map templates = new HashMap<>(); + private final Map templates = new HashMap<>(); public RankEvalSpec(List ratedRequests, EvaluationMetric metric, Collection templates) { this.metric = Objects.requireNonNull(metric, "Cannot evaluate ranking if no evaluation metric is provided."); @@ -68,8 +68,8 @@ public class RankEvalSpec implements Writeable, ToXContentObject { this.ratedRequests = ratedRequests; if (templates == null || templates.isEmpty()) { for (RatedRequest request : ratedRequests) { - if (request.getTestRequest() == null) { - throw new IllegalStateException("Cannot evaluate ranking if neither template nor test request is " + if (request.getEvaluationRequest() == null) { + throw new IllegalStateException("Cannot evaluate ranking if neither template nor evaluation request is " + "provided. Seen for request id: " + request.getId()); } } diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedRequest.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedRequest.java index 392ce5d0633..79dd693b3ac 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedRequest.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedRequest.java @@ -75,9 +75,12 @@ public class RatedRequest implements Writeable, ToXContentObject { private final String id; private final List summaryFields; private final List ratedDocs; - // Search request to execute for this rated request. This can be null if template and corresponding parameters are supplied. + /** + * Search request to execute for this rated request. This can be null in + * case the query is supplied as a template with corresponding parameters + */ @Nullable - private SearchSourceBuilder testRequest; + private final SearchSourceBuilder evaluationRequest; /** * Map of parameters to use for filling a query template, can be used * instead of providing testRequest. @@ -86,27 +89,49 @@ public class RatedRequest implements Writeable, ToXContentObject { @Nullable private String templateId; - private RatedRequest(String id, List ratedDocs, SearchSourceBuilder testRequest, + /** + * Create a rated request with template ids and parameters. + * + * @param id a unique name for this rated request + * @param ratedDocs a list of document ratings + * @param params template parameters + * @param templateId a templare id + */ + public RatedRequest(String id, List ratedDocs, Map params, + String templateId) { + this(id, ratedDocs, null, params, templateId); + } + + /** + * Create a rated request using a {@link SearchSourceBuilder} to define the + * evaluated query. + * + * @param id a unique name for this rated request + * @param ratedDocs a list of document ratings + * @param evaluatedQuery the query that is evaluated + */ + public RatedRequest(String id, List ratedDocs, SearchSourceBuilder evaluatedQuery) { + this(id, ratedDocs, evaluatedQuery, new HashMap<>(), null); + } + + private RatedRequest(String id, List ratedDocs, SearchSourceBuilder evaluatedQuery, Map params, String templateId) { - if (params != null && (params.size() > 0 && testRequest != null)) { + if (params != null && (params.size() > 0 && evaluatedQuery != null)) { throw new IllegalArgumentException( - "Ambiguous rated request: Set both, verbatim test request and test request " - + "template parameters."); + "Ambiguous rated request: Set both, verbatim test request and test request " + "template parameters."); } - if (templateId != null && testRequest != null) { + if (templateId != null && evaluatedQuery != null) { throw new IllegalArgumentException( - "Ambiguous rated request: Set both, verbatim test request and test request " - + "template parameters."); + "Ambiguous rated request: Set both, verbatim test request and test request " + "template parameters."); } - if ((params == null || params.size() < 1) && testRequest == null) { - throw new IllegalArgumentException( - "Need to set at least test request or test request template parameters."); + if ((params == null || params.size() < 1) && evaluatedQuery == null) { + throw new IllegalArgumentException("Need to set at least test request or test request template parameters."); } if ((params != null && params.size() > 0) && templateId == null) { - throw new IllegalArgumentException( - "If template parameters are supplied need to set id of template to apply " - + "them to too."); + throw new IllegalArgumentException("If template parameters are supplied need to set id of template to apply " + "them to too."); } + validateEvaluatedQuery(evaluatedQuery); + // check that not two documents with same _index/id are specified Set docKeys = new HashSet<>(); for (RatedDocument doc : ratedDocs) { @@ -118,7 +143,7 @@ public class RatedRequest implements Writeable, ToXContentObject { } this.id = id; - this.testRequest = testRequest; + this.evaluationRequest = evaluatedQuery; this.ratedDocs = new ArrayList<>(ratedDocs); if (params != null) { this.params = new HashMap<>(params); @@ -129,18 +154,30 @@ public class RatedRequest implements Writeable, ToXContentObject { this.summaryFields = new ArrayList<>(); } - public RatedRequest(String id, List ratedDocs, Map params, - String templateId) { - this(id, ratedDocs, null, params, templateId); + static void validateEvaluatedQuery(SearchSourceBuilder evaluationRequest) { + // ensure that testRequest, if set, does not contain aggregation, suggest or highlighting section + if (evaluationRequest != null) { + if (evaluationRequest.suggest() != null) { + throw new IllegalArgumentException("Query in rated requests should not contain a suggest section."); + } + if (evaluationRequest.aggregations() != null) { + throw new IllegalArgumentException("Query in rated requests should not contain aggregations."); + } + if (evaluationRequest.highlighter() != null) { + throw new IllegalArgumentException("Query in rated requests should not contain a highlighter section."); + } + if (evaluationRequest.explain() != null && evaluationRequest.explain()) { + throw new IllegalArgumentException("Query in rated requests should not use explain."); + } + if (evaluationRequest.profile()) { + throw new IllegalArgumentException("Query in rated requests should not use profile."); + } + } } - public RatedRequest(String id, List ratedDocs, SearchSourceBuilder testRequest) { - this(id, ratedDocs, testRequest, new HashMap<>(), null); - } - - public RatedRequest(StreamInput in) throws IOException { + RatedRequest(StreamInput in) throws IOException { this.id = in.readString(); - testRequest = in.readOptionalWriteable(SearchSourceBuilder::new); + evaluationRequest = in.readOptionalWriteable(SearchSourceBuilder::new); int intentSize = in.readInt(); ratedDocs = new ArrayList<>(intentSize); @@ -159,7 +196,7 @@ public class RatedRequest implements Writeable, ToXContentObject { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(id); - out.writeOptionalWriteable(testRequest); + out.writeOptionalWriteable(evaluationRequest); out.writeInt(ratedDocs.size()); for (RatedDocument ratedDoc : ratedDocs) { @@ -173,8 +210,8 @@ public class RatedRequest implements Writeable, ToXContentObject { out.writeOptionalString(this.templateId); } - public SearchSourceBuilder getTestRequest() { - return testRequest; + public SearchSourceBuilder getEvaluationRequest() { + return evaluationRequest; } /** return the user supplied request id */ @@ -240,8 +277,8 @@ public class RatedRequest implements Writeable, ToXContentObject { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field(ID_FIELD.getPreferredName(), this.id); - if (testRequest != null) { - builder.field(REQUEST_FIELD.getPreferredName(), this.testRequest); + if (evaluationRequest != null) { + builder.field(REQUEST_FIELD.getPreferredName(), this.evaluationRequest); } builder.startArray(RATINGS_FIELD.getPreferredName()); for (RatedDocument doc : this.ratedDocs) { @@ -285,7 +322,7 @@ public class RatedRequest implements Writeable, ToXContentObject { RatedRequest other = (RatedRequest) obj; - return Objects.equals(id, other.id) && Objects.equals(testRequest, other.testRequest) + return Objects.equals(id, other.id) && Objects.equals(evaluationRequest, other.evaluationRequest) && Objects.equals(summaryFields, other.summaryFields) && Objects.equals(ratedDocs, other.ratedDocs) && Objects.equals(params, other.params) @@ -294,7 +331,7 @@ public class RatedRequest implements Writeable, ToXContentObject { @Override public final int hashCode() { - return Objects.hash(id, testRequest, summaryFields, ratedDocs, params, + return Objects.hash(id, evaluationRequest, summaryFields, ratedDocs, params, templateId); } } diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java index 019ae274466..e0a0b3ea133 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java @@ -52,6 +52,7 @@ import java.util.Map.Entry; import java.util.concurrent.ConcurrentHashMap; import static org.elasticsearch.common.xcontent.XContentHelper.createParser; +import static org.elasticsearch.index.rankeval.RatedRequest.validateEvaluatedQuery; /** * Instances of this class execute a collection of search intents (read: user @@ -99,15 +100,17 @@ public class TransportRankEvalAction extends HandledTransportAction ratedRequestsInSearch = new ArrayList<>(); for (RatedRequest ratedRequest : ratedRequests) { - SearchSourceBuilder ratedSearchSource = ratedRequest.getTestRequest(); - if (ratedSearchSource == null) { + SearchSourceBuilder evaluationRequest = ratedRequest.getEvaluationRequest(); + if (evaluationRequest == null) { Map params = ratedRequest.getParams(); String templateId = ratedRequest.getTemplateId(); TemplateScript.Factory templateScript = scriptsWithoutParams.get(templateId); String resolvedRequest = templateScript.newInstance(params).execute(); try (XContentParser subParser = createParser(namedXContentRegistry, LoggingDeprecationHandler.INSTANCE, new BytesArray(resolvedRequest), XContentType.JSON)) { - ratedSearchSource = SearchSourceBuilder.fromXContent(subParser, false); + evaluationRequest = SearchSourceBuilder.fromXContent(subParser, false); + // check for parts that should not be part of a ranking evaluation request + validateEvaluatedQuery(evaluationRequest); } catch (IOException e) { // if we fail parsing, put the exception into the errors map and continue errors.put(ratedRequest.getId(), e); @@ -116,17 +119,17 @@ public class TransportRankEvalAction extends HandledTransportAction summaryFields = ratedRequest.getSummaryFields(); if (summaryFields.isEmpty()) { - ratedSearchSource.fetchSource(false); + evaluationRequest.fetchSource(false); } else { - ratedSearchSource.fetchSource(summaryFields.toArray(new String[summaryFields.size()]), new String[0]); + evaluationRequest.fetchSource(summaryFields.toArray(new String[summaryFields.size()]), new String[0]); } - SearchRequest searchRequest = new SearchRequest(request.indices(), ratedSearchSource); + SearchRequest searchRequest = new SearchRequest(request.indices(), evaluationRequest); searchRequest.indicesOptions(request.indicesOptions()); msearchRequest.add(searchRequest); } diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java index 196b50b7f61..084f29b8c9a 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java @@ -33,7 +33,11 @@ import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; +import org.elasticsearch.search.suggest.SuggestBuilder; +import org.elasticsearch.search.suggest.SuggestBuilders; import org.elasticsearch.test.ESTestCase; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -165,7 +169,7 @@ public class RatedRequestsTests extends ESTestCase { private static RatedRequest mutateTestItem(RatedRequest original) { String id = original.getId(); - SearchSourceBuilder testRequest = original.getTestRequest(); + SearchSourceBuilder evaluationRequest = original.getEvaluationRequest(); List ratedDocs = original.getRatedDocs(); Map params = original.getParams(); List summaryFields = original.getSummaryFields(); @@ -177,11 +181,11 @@ public class RatedRequestsTests extends ESTestCase { id = randomValueOtherThan(id, () -> randomAlphaOfLength(10)); break; case 1: - if (testRequest != null) { - int size = randomValueOtherThan(testRequest.size(), () -> randomInt(Integer.MAX_VALUE)); - testRequest = new SearchSourceBuilder(); - testRequest.size(size); - testRequest.query(new MatchAllQueryBuilder()); + if (evaluationRequest != null) { + int size = randomValueOtherThan(evaluationRequest.size(), () -> randomInt(Integer.MAX_VALUE)); + evaluationRequest = new SearchSourceBuilder(); + evaluationRequest.size(size); + evaluationRequest.query(new MatchAllQueryBuilder()); } else { if (randomBoolean()) { Map mutated = new HashMap<>(); @@ -204,10 +208,10 @@ public class RatedRequestsTests extends ESTestCase { } RatedRequest ratedRequest; - if (testRequest == null) { + if (evaluationRequest == null) { ratedRequest = new RatedRequest(id, ratedDocs, params, templateId); } else { - ratedRequest = new RatedRequest(id, ratedDocs, testRequest); + ratedRequest = new RatedRequest(id, ratedDocs, evaluationRequest); } ratedRequest.addSummaryFields(summaryFields); @@ -258,6 +262,44 @@ public class RatedRequestsTests extends ESTestCase { expectThrows(IllegalArgumentException.class, () -> new RatedRequest("id", ratedDocs, null, "templateId")); } + public void testAggsNotAllowed() { + List ratedDocs = Arrays.asList(new RatedDocument("index1", "id1", 1)); + SearchSourceBuilder query = new SearchSourceBuilder(); + query.aggregation(AggregationBuilders.terms("fieldName")); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new RatedRequest("id", ratedDocs, query)); + assertEquals("Query in rated requests should not contain aggregations.", e.getMessage()); + } + + public void testSuggestionsNotAllowed() { + List ratedDocs = Arrays.asList(new RatedDocument("index1", "id1", 1)); + SearchSourceBuilder query = new SearchSourceBuilder(); + query.suggest(new SuggestBuilder().addSuggestion("id", SuggestBuilders.completionSuggestion("fieldname"))); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new RatedRequest("id", ratedDocs, query)); + assertEquals("Query in rated requests should not contain a suggest section.", e.getMessage()); + } + + public void testHighlighterNotAllowed() { + List ratedDocs = Arrays.asList(new RatedDocument("index1", "id1", 1)); + SearchSourceBuilder query = new SearchSourceBuilder(); + query.highlighter(new HighlightBuilder().field("field")); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new RatedRequest("id", ratedDocs, query)); + assertEquals("Query in rated requests should not contain a highlighter section.", e.getMessage()); + } + + public void testExplainNotAllowed() { + List ratedDocs = Arrays.asList(new RatedDocument("index1", "id1", 1)); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> new RatedRequest("id", ratedDocs, new SearchSourceBuilder().explain(true))); + assertEquals("Query in rated requests should not use explain.", e.getMessage()); + } + + public void testProfileNotAllowed() { + List ratedDocs = Arrays.asList(new RatedDocument("index1", "id1", 1)); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> new RatedRequest("id", ratedDocs, new SearchSourceBuilder().profile(true))); + assertEquals("Query in rated requests should not use profile.", e.getMessage()); + } + /** * test that modifying the order of index/docId to make sure it doesn't * matter for parsing xContent @@ -287,7 +329,7 @@ public class RatedRequestsTests extends ESTestCase { try (XContentParser parser = createParser(JsonXContent.jsonXContent, querySpecString)) { RatedRequest specification = RatedRequest.fromXContent(parser); assertEquals("my_qa_query", specification.getId()); - assertNotNull(specification.getTestRequest()); + assertNotNull(specification.getEvaluationRequest()); List ratedDocs = specification.getRatedDocs(); assertEquals(3, ratedDocs.size()); for (int i = 0; i < 3; i++) { diff --git a/qa/smoke-test-rank-eval-with-mustache/src/test/java/org/elasticsearch/index/rankeval/SmokeMultipleTemplatesIT.java b/qa/smoke-test-rank-eval-with-mustache/src/test/java/org/elasticsearch/index/rankeval/SmokeMultipleTemplatesIT.java index 50860ddd87b..0ad78ad0c7a 100644 --- a/qa/smoke-test-rank-eval-with-mustache/src/test/java/org/elasticsearch/index/rankeval/SmokeMultipleTemplatesIT.java +++ b/qa/smoke-test-rank-eval-with-mustache/src/test/java/org/elasticsearch/index/rankeval/SmokeMultipleTemplatesIT.java @@ -30,6 +30,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -106,6 +107,43 @@ public class SmokeMultipleTemplatesIT extends ESIntegTestCase { assertEquals(0.9, response.getEvaluationResult(), Double.MIN_VALUE); } + public void testTemplateWithAggsFails() { + String template = "{ \"aggs\" : { \"avg_grade\" : { \"avg\" : { \"field\" : \"grade\" }}}}"; + assertTemplatedRequestFailures(template, "Query in rated requests should not contain aggregations."); + } + + public void testTemplateWithSuggestFails() { + String template = "{\"suggest\" : {\"my-suggestion\" : {\"text\" : \"Elastic\",\"term\" : {\"field\" : \"message\"}}}}"; + assertTemplatedRequestFailures(template, "Query in rated requests should not contain a suggest section."); + } + + public void testTemplateWithHighlighterFails() { + String template = "{\"highlight\" : { \"fields\" : {\"content\" : {}}}}"; + assertTemplatedRequestFailures(template, "Query in rated requests should not contain a highlighter section."); + } + + public void testTemplateWithProfileFails() { + String template = "{\"profile\" : \"true\" }"; + assertTemplatedRequestFailures(template, "Query in rated requests should not use profile."); + } + + public void testTemplateWithExplainFails() { + String template = "{\"explain\" : \"true\" }"; + assertTemplatedRequestFailures(template, "Query in rated requests should not use explain."); + } + + private static void assertTemplatedRequestFailures(String template, String expectedMessage) { + List ratedDocs = Arrays.asList(new RatedDocument("index1", "id1", 1)); + RatedRequest ratedRequest = new RatedRequest("id", ratedDocs, Collections.singletonMap("param1", "value1"), "templateId"); + Collection templates = Collections.singletonList(new ScriptWithId("templateId", + new Script(ScriptType.INLINE, Script.DEFAULT_TEMPLATE_LANG, template, Collections.emptyMap()))); + RankEvalSpec rankEvalSpec = new RankEvalSpec(Collections.singletonList(ratedRequest), new PrecisionAtK(), templates); + RankEvalRequest rankEvalRequest = new RankEvalRequest(rankEvalSpec, new String[] { "test" }); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> client().execute(RankEvalAction.INSTANCE, rankEvalRequest).actionGet()); + assertEquals(expectedMessage, e.getMessage()); + } + private static List createRelevant(String... docs) { List relevant = new ArrayList<>(); for (String doc : docs) { From af10fd65e706578e81d38e6bba3b7ac22321ecb2 Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Mon, 14 May 2018 18:48:11 +0300 Subject: [PATCH 08/31] Unmute IndexUpgradeIT tests The errors were caused because release tests would use a copy of the public key that was formatted differently. The change to the public key format was introduced in [1]. Release tests Jenkins job has now been updated to use the correct key format depending on the branch they run on [2] Closes #30430 [1] https://github.com/elastic/elasticsearch/pull/30251 [2] https://github.com/elastic/infra/pull/4944 --- .../java/org/elasticsearch/xpack/upgrade/IndexUpgradeIT.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeIT.java b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeIT.java index 9f1fb95ed48..ef5c3acc3d2 100644 --- a/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeIT.java +++ b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeIT.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.upgrade; -import org.apache.lucene.util.LuceneTestCase.AwaitsFix; import org.elasticsearch.Build; import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.action.search.SearchResponse; @@ -31,7 +30,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThro import static org.hamcrest.Matchers.empty; import static org.hamcrest.core.IsEqual.equalTo; -@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/30430") public class IndexUpgradeIT extends IndexUpgradeIntegTestCase { @Before From 4a4e3d70d5ac22bdec44ebd4605b6f958ff1898a Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Mon, 14 May 2018 12:22:35 -0400 Subject: [PATCH 09/31] Default to one shard (#30539) This commit changes the default out-of-the-box configuration for the number of shards from five to one. We think this will help address a common problem of oversharding. For users with time-based indices that need a different default, this can be managed with index templates. For users with non-time-based indices that find they need to re-shard with the split API in place they no longer need to resort only to reindexing. Since this has the impact of changing the default number of shards used in REST tests, we want to ensure that we still have coverage for issues that could arise from multiple shards. As such, we randomize (rarely) the default number of shards in REST tests to two. This is managed via a global index template. However, some tests check the templates that are in the cluster state during the test. Since this template is randomly there, we need a way for tests to skip adding the template used to set the number of shards to two. For this we add the default_shards feature skip. To avoid having to write our docs in a complicated way because sometimes they might be behind one shard, and sometimes they might be behind two shards we apply the default_shards feature skip to all docs tests. That is, these tests will always run with the default number of shards (one). --- .../doc/RestTestsFromSnippetsTask.groovy | 1 + .../org/elasticsearch/client/SearchIT.java | 6 ++-- .../documentation/SearchDocumentationIT.java | 2 +- .../test/rest/CreatedLocationHeaderIT.java | 10 +++++- .../bucket/children-aggregation.asciidoc | 4 +-- .../metrics/geocentroid-aggregation.asciidoc | 10 +++--- .../pattern-replace-charfilter.asciidoc | 4 +-- .../tokenizers/edgengram-tokenizer.asciidoc | 4 +-- docs/reference/api-conventions.asciidoc | 6 +--- docs/reference/cat/allocation.asciidoc | 2 +- docs/reference/cat/health.asciidoc | 4 +-- docs/reference/cat/indices.asciidoc | 4 +-- docs/reference/cat/segments.asciidoc | 4 +-- docs/reference/cluster/health.asciidoc | 8 ++--- docs/reference/getting-started.asciidoc | 6 ++-- docs/reference/glossary.asciidoc | 11 +++--- .../how-to/recipes/stemming.asciidoc | 34 +++++++++--------- docs/reference/indices/flush.asciidoc | 10 +----- docs/reference/indices/shrink-index.asciidoc | 4 +-- .../mapping/params/normalizer.asciidoc | 22 ++++++------ .../mapping/types/percolator.asciidoc | 12 +++---- .../query-dsl/percolate-query.asciidoc | 22 ++++++------ .../query-dsl/terms-set-query.asciidoc | 8 ++--- docs/reference/search/count.asciidoc | 4 +-- docs/reference/search/search-shards.asciidoc | 4 +-- .../suggesters/completion-suggest.asciidoc | 8 ++--- docs/reference/search/validate.asciidoc | 30 ++-------------- .../test/reindex/35_search_failures.yml | 9 ++++- .../update_by_query/35_search_failure.yml | 9 ++++- .../test/multi_cluster/10_basic.yml | 2 +- .../test/remote_cluster/10_basic.yml | 4 +++ .../test/cat.templates/10_basic.yml | 6 ++++ .../test/indices.shrink/10_basic.yml | 3 +- .../test/indices.shrink/20_source_mapping.yml | 3 +- .../test/indices.shrink/30_copy_settings.yml | 1 + .../search.aggregation/240_max_buckets.yml | 35 +++++++++++++++++-- .../metadata/MetaDataCreateIndexService.java | 26 ++++++++++---- .../allocation/FilteringAllocationIT.java | 2 +- .../metadata/IndexCreationTaskTests.java | 2 +- .../MetaDataCreateIndexServiceTests.java | 16 +++++++++ .../rest/yaml/ESClientYamlSuiteTestCase.java | 20 +++++++++++ .../test/rest/yaml/Features.java | 1 + .../qa/sql/security/RestSqlSecurityIT.java | 6 +--- 43 files changed, 232 insertions(+), 157 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy index 15a4f21b175..adacc1863c5 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy @@ -225,6 +225,7 @@ public class RestTestsFromSnippetsTask extends SnippetsTask { * warning every time. */ current.println(" - skip:") current.println(" features: ") + current.println(" - default_shards") current.println(" - stash_in_key") current.println(" - stash_in_path") current.println(" - stash_path_replace") diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java index 9828041332b..549b4ce0a85 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java @@ -312,14 +312,14 @@ public class SearchIT extends ESRestHighLevelClientTestCase { MatrixStats matrixStats = searchResponse.getAggregations().get("agg1"); assertEquals(5, matrixStats.getFieldCount("num")); assertEquals(56d, matrixStats.getMean("num"), 0d); - assertEquals(1830d, matrixStats.getVariance("num"), 0d); - assertEquals(0.09340198804973046, matrixStats.getSkewness("num"), 0d); + assertEquals(1830.0000000000002, matrixStats.getVariance("num"), 0d); + assertEquals(0.09340198804973039, matrixStats.getSkewness("num"), 0d); assertEquals(1.2741646510794589, matrixStats.getKurtosis("num"), 0d); assertEquals(5, matrixStats.getFieldCount("num2")); assertEquals(29d, matrixStats.getMean("num2"), 0d); assertEquals(330d, matrixStats.getVariance("num2"), 0d); assertEquals(-0.13568039346585542, matrixStats.getSkewness("num2"), 1.0e-16); - assertEquals(1.3517561983471074, matrixStats.getKurtosis("num2"), 0d); + assertEquals(1.3517561983471071, matrixStats.getKurtosis("num2"), 0d); assertEquals(-767.5, matrixStats.getCovariance("num", "num2"), 0d); assertEquals(-0.9876336291667923, matrixStats.getCorrelation("num", "num2"), 0d); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java index 4400d05a9f8..6fdc60fcb33 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java @@ -800,7 +800,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase { double qualityLevel = evalQuality.getQualityLevel(); // <3> assertEquals(1.0 / 3.0, qualityLevel, 0.0); List hitsAndRatings = evalQuality.getHitsAndRatings(); - RatedSearchHit ratedSearchHit = hitsAndRatings.get(0); + RatedSearchHit ratedSearchHit = hitsAndRatings.get(2); assertEquals("3", ratedSearchHit.getSearchHit().getId()); // <4> assertFalse(ratedSearchHit.getRating().isPresent()); // <5> MetricDetail metricDetails = evalQuality.getMetricDetails(); diff --git a/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/CreatedLocationHeaderIT.java b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/CreatedLocationHeaderIT.java index c61b736bf6d..74cc251f52c 100644 --- a/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/CreatedLocationHeaderIT.java +++ b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/CreatedLocationHeaderIT.java @@ -21,18 +21,22 @@ package org.elasticsearch.test.rest; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; +import org.junit.Before; import java.io.IOException; import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.startsWith; /** * Tests for the "Location" header returned when returning {@code 201 CREATED}. */ public class CreatedLocationHeaderIT extends ESRestTestCase { + public void testCreate() throws IOException { locationTestCase("PUT", "test/test/1"); } @@ -54,8 +58,11 @@ public class CreatedLocationHeaderIT extends ESRestTestCase { private void locationTestCase(String method, String url) throws IOException { locationTestCase(client().performRequest(method, url, emptyMap(), new StringEntity("{\"test\": \"test\"}", ContentType.APPLICATION_JSON))); + // we have to delete the index otherwise the second indexing request will route to the single shard and not produce a 201 + final Response response = client().performRequest(new Request("DELETE", "test")); + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); locationTestCase(client().performRequest(method, url + "?routing=cat", emptyMap(), - new StringEntity("{\"test\": \"test\"}", ContentType.APPLICATION_JSON))); + new StringEntity("{\"test\": \"test\"}", ContentType.APPLICATION_JSON))); } private void locationTestCase(Response response) throws IOException { @@ -65,4 +72,5 @@ public class CreatedLocationHeaderIT extends ESRestTestCase { Response getResponse = client().performRequest("GET", location); assertEquals(singletonMap("test", "test"), entityAsMap(getResponse).get("_source")); } + } diff --git a/docs/reference/aggregations/bucket/children-aggregation.asciidoc b/docs/reference/aggregations/bucket/children-aggregation.asciidoc index e616359e8a8..3805b2e564c 100644 --- a/docs/reference/aggregations/bucket/children-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/children-aggregation.asciidoc @@ -137,8 +137,8 @@ Possible response: "took": 25, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, diff --git a/docs/reference/aggregations/metrics/geocentroid-aggregation.asciidoc b/docs/reference/aggregations/metrics/geocentroid-aggregation.asciidoc index 59cadf1518e..7dd5dca61b9 100644 --- a/docs/reference/aggregations/metrics/geocentroid-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/geocentroid-aggregation.asciidoc @@ -60,8 +60,8 @@ The response for the above aggregation: "aggregations": { "centroid": { "location": { - "lat": 51.00982963806018, - "lon": 3.9662131061777472 + "lat": 51.009829603135586, + "lon": 3.9662130642682314 }, "count": 6 } @@ -113,8 +113,8 @@ The response for the above aggregation: "doc_count": 3, "centroid": { "location": { - "lat": 52.371655656024814, - "lon": 4.909563297405839 + "lat": 52.371655642054975, + "lon": 4.9095632415264845 }, "count": 3 } @@ -125,7 +125,7 @@ The response for the above aggregation: "centroid": { "location": { "lat": 48.86055548675358, - "lon": 2.3316944623366 + "lon": 2.331694420427084 }, "count": 2 } diff --git a/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc b/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc index 6e881121a0f..3da1c60db05 100644 --- a/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc +++ b/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc @@ -235,8 +235,8 @@ The output from the above is: "timed_out": false, "took": $body.took, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, diff --git a/docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc index 3cf1f8403e2..9b6861627be 100644 --- a/docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc +++ b/docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc @@ -294,8 +294,8 @@ GET my_index/_search "took": $body.took, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, diff --git a/docs/reference/api-conventions.asciidoc b/docs/reference/api-conventions.asciidoc index e2824bb5285..42216a9a0fc 100644 --- a/docs/reference/api-conventions.asciidoc +++ b/docs/reference/api-conventions.asciidoc @@ -300,11 +300,7 @@ Responds: "indices": { "twitter": { "shards": { - "0": [{"state": "STARTED"}, {"state": "UNASSIGNED"}], - "1": [{"state": "STARTED"}, {"state": "UNASSIGNED"}], - "2": [{"state": "STARTED"}, {"state": "UNASSIGNED"}], - "3": [{"state": "STARTED"}, {"state": "UNASSIGNED"}], - "4": [{"state": "STARTED"}, {"state": "UNASSIGNED"}] + "0": [{"state": "STARTED"}, {"state": "UNASSIGNED"}] } } } diff --git a/docs/reference/cat/allocation.asciidoc b/docs/reference/cat/allocation.asciidoc index 3719758ff58..a9de182e3c0 100644 --- a/docs/reference/cat/allocation.asciidoc +++ b/docs/reference/cat/allocation.asciidoc @@ -16,7 +16,7 @@ Might respond with: [source,txt] -------------------------------------------------- shards disk.indices disk.used disk.avail disk.total disk.percent host ip node - 5 260b 47.3gb 43.4gb 100.7gb 46 127.0.0.1 127.0.0.1 CSUXak2 + 1 260b 47.3gb 43.4gb 100.7gb 46 127.0.0.1 127.0.0.1 CSUXak2 -------------------------------------------------- // TESTRESPONSE[s/\d+(\.\d+)?[tgmk]?b/\\d+(\\.\\d+)?[tgmk]?b/ s/46/\\d+/] // TESTRESPONSE[s/CSUXak2/.+/ _cat] diff --git a/docs/reference/cat/health.asciidoc b/docs/reference/cat/health.asciidoc index ca2a1838adb..5f053edf308 100644 --- a/docs/reference/cat/health.asciidoc +++ b/docs/reference/cat/health.asciidoc @@ -14,7 +14,7 @@ GET /_cat/health?v [source,txt] -------------------------------------------------- epoch timestamp cluster status node.total node.data shards pri relo init unassign pending_tasks max_task_wait_time active_shards_percent -1475871424 16:17:04 elasticsearch green 1 1 5 5 0 0 0 0 - 100.0% +1475871424 16:17:04 elasticsearch green 1 1 1 1 0 0 0 0 - 100.0% -------------------------------------------------- // TESTRESPONSE[s/1475871424 16:17:04/\\d+ \\d+:\\d+:\\d+/] // TESTRESPONSE[s/elasticsearch/[^ ]+/ s/0 -/\\d+ (-|\\d+(\\.\\d+)?[ms]+)/ _cat] @@ -33,7 +33,7 @@ which looks like: [source,txt] -------------------------------------------------- cluster status node.total node.data shards pri relo init unassign pending_tasks max_task_wait_time active_shards_percent -elasticsearch green 1 1 5 5 0 0 0 0 - 100.0% +elasticsearch green 1 1 1 1 0 0 0 0 - 100.0% -------------------------------------------------- // TESTRESPONSE[s/elasticsearch/[^ ]+/ s/0 -/\\d+ (-|\\d+(\\.\\d+)?[ms]+)/ _cat] diff --git a/docs/reference/cat/indices.asciidoc b/docs/reference/cat/indices.asciidoc index 3a50a836d0f..2a5b865fefa 100644 --- a/docs/reference/cat/indices.asciidoc +++ b/docs/reference/cat/indices.asciidoc @@ -18,7 +18,7 @@ Might respond with: -------------------------------------------------- health status index uuid pri rep docs.count docs.deleted store.size pri.store.size yellow open twitter u8FNjxh8Rfy_awN11oDKYQ 1 1 1200 0 88.1kb 88.1kb -green open twitter2 nYFWZEO7TUiOjLQXBaYJpA 5 0 0 0 260b 260b +green open twitter2 nYFWZEO7TUiOjLQXBaYJpA 1 0 0 0 260b 260b -------------------------------------------------- // TESTRESPONSE[s/\d+(\.\d+)?[tgmk]?b/\\d+(\\.\\d+)?[tgmk]?b/] // TESTRESPONSE[s/u8FNjxh8Rfy_awN11oDKYQ|nYFWZEO7TUiOjLQXBaYJpA/.+/ _cat] @@ -81,7 +81,7 @@ Which looks like: -------------------------------------------------- health status index uuid pri rep docs.count docs.deleted store.size pri.store.size yellow open twitter u8FNjxh8Rfy_awN11oDKYQ 1 1 1200 0 88.1kb 88.1kb -green open twitter2 nYFWZEO7TUiOjLQXBaYJpA 5 0 0 0 260b 260b +green open twitter2 nYFWZEO7TUiOjLQXBaYJpA 1 0 0 0 260b 260b -------------------------------------------------- // TESTRESPONSE[s/\d+(\.\d+)?[tgmk]?b/\\d+(\\.\\d+)?[tgmk]?b/] // TESTRESPONSE[s/u8FNjxh8Rfy_awN11oDKYQ|nYFWZEO7TUiOjLQXBaYJpA/.+/ _cat] diff --git a/docs/reference/cat/segments.asciidoc b/docs/reference/cat/segments.asciidoc index 88fb18b3637..a4c2c54d8ee 100644 --- a/docs/reference/cat/segments.asciidoc +++ b/docs/reference/cat/segments.asciidoc @@ -17,8 +17,8 @@ might look like: ["source","txt",subs="attributes,callouts"] -------------------------------------------------- index shard prirep ip segment generation docs.count docs.deleted size size.memory committed searchable version compound -test 4 p 127.0.0.1 _0 0 1 0 3kb 2042 false true {lucene_version} true -test1 4 p 127.0.0.1 _0 0 1 0 3kb 2042 false true {lucene_version} true +test 0 p 127.0.0.1 _0 0 1 0 3kb 2042 false true {lucene_version} true +test1 0 p 127.0.0.1 _0 0 1 0 3kb 2042 false true {lucene_version} true -------------------------------------------------- // TESTRESPONSE[s/3kb/\\d+(\\.\\d+)?[mk]?b/ s/2042/\\d+/ _cat] diff --git a/docs/reference/cluster/health.asciidoc b/docs/reference/cluster/health.asciidoc index 6cc99a25476..87c4e17f452 100644 --- a/docs/reference/cluster/health.asciidoc +++ b/docs/reference/cluster/health.asciidoc @@ -3,7 +3,7 @@ The cluster health API allows to get a very simple status on the health of the cluster. For example, on a quiet single node cluster with a single index -with 5 shards and one replica, this: +with one shard and one replica, this: [source,js] -------------------------------------------------- @@ -22,11 +22,11 @@ Returns this: "timed_out" : false, "number_of_nodes" : 1, "number_of_data_nodes" : 1, - "active_primary_shards" : 5, - "active_shards" : 5, + "active_primary_shards" : 1, + "active_shards" : 1, "relocating_shards" : 0, "initializing_shards" : 0, - "unassigned_shards" : 5, + "unassigned_shards" : 1, "delayed_unassigned_shards": 0, "number_of_pending_tasks" : 0, "number_of_in_flight_fetch": 0, diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index 937917823f5..d684be80c00 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -95,7 +95,7 @@ Replication is important for two primary reasons: To summarize, each index can be split into multiple shards. An index can also be replicated zero (meaning no replicas) or more times. Once replicated, each index will have primary shards (the original shards that were replicated from) and replica shards (the copies of the primary shards). The number of shards and replicas can be defined per index at the time the index is created. After the index is created, you may change the number of replicas dynamically anytime but you cannot change the number of shards after-the-fact. -By default, each index in Elasticsearch is allocated 5 primary shards and 1 replica which means that if you have at least two nodes in your cluster, your index will have 5 primary shards and another 5 replica shards (1 complete replica) for a total of 10 shards per index. +By default, each index in Elasticsearch is allocated one primary shard and one replica which means that if you have at least two nodes in your cluster, your index will have one primary shard and another replica shard (one complete replica) for a total of two shards per index. NOTE: Each Elasticsearch shard is a Lucene index. There is a maximum number of documents you can have in a single Lucene index. As of https://issues.apache.org/jira/browse/LUCENE-5843[`LUCENE-5843`], the limit is `2,147,483,519` (= Integer.MAX_VALUE - 128) documents. You can monitor shard sizes using the {ref}/cat-shards.html[`_cat/shards`] API. @@ -366,11 +366,11 @@ And the response: [source,txt] -------------------------------------------------- health status index uuid pri rep docs.count docs.deleted store.size pri.store.size -yellow open customer 95SQ4TSUT7mWBT7VNHH67A 5 1 0 0 260b 260b +yellow open customer 95SQ4TSUT7mWBT7VNHH67A 1 1 0 0 260b 260b -------------------------------------------------- // TESTRESPONSE[s/95SQ4TSUT7mWBT7VNHH67A/.+/ s/260b/\\d+\\.?\\d?k?b/ _cat] -The results of the second command tells us that we now have 1 index named customer and it has 5 primary shards and 1 replica (the defaults) and it contains 0 documents in it. +The results of the second command tells us that we now have one index named customer and it has one primary shard and one replica (the defaults) and it contains zero documents in it. You might also notice that the customer index has a yellow health tagged to it. Recall from our previous discussion that yellow means that some replicas are not (yet) allocated. The reason this happens for this index is because Elasticsearch by default created one replica for this index. Since we only have one node running at the moment, that one replica cannot yet be allocated (for high availability) until a later point in time when another node joins the cluster. Once that replica gets allocated onto a second node, the health status for this index will turn to green. diff --git a/docs/reference/glossary.asciidoc b/docs/reference/glossary.asciidoc index 53164d366cd..c6b9309fa32 100644 --- a/docs/reference/glossary.asciidoc +++ b/docs/reference/glossary.asciidoc @@ -105,12 +105,13 @@ you index a document, it is indexed first on the primary shard, then on all <> of the primary shard. + - By default, an <> has 5 primary shards. You can - specify fewer or more primary shards to scale the number of - <> that your index can handle. + By default, an <> has one primary shard. You can specify + more primary shards to scale the number of <> + that your index can handle. + - You cannot change the number of primary shards in an index, once the - index is created. + You cannot change the number of primary shards in an index, once the index is + index is created. However, an index can be split into a new index using the + <>. + See also <> diff --git a/docs/reference/how-to/recipes/stemming.asciidoc b/docs/reference/how-to/recipes/stemming.asciidoc index 4e12dfd7eca..37901cb3abe 100644 --- a/docs/reference/how-to/recipes/stemming.asciidoc +++ b/docs/reference/how-to/recipes/stemming.asciidoc @@ -78,31 +78,31 @@ GET index/_search "took": 2, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, "hits": { "total": 2, - "max_score": 0.2876821, + "max_score": 0.18232156, "hits": [ { "_index": "index", "_type": "_doc", - "_id": "2", - "_score": 0.2876821, + "_id": "1", + "_score": 0.18232156, "_source": { - "body": "A pair of skis" + "body": "Ski resort" } }, { "_index": "index", "_type": "_doc", - "_id": "1", - "_score": 0.2876821, + "_id": "2", + "_score": 0.18232156, "_source": { - "body": "Ski resort" + "body": "A pair of skis" } } ] @@ -136,20 +136,20 @@ GET index/_search "took": 1, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, "hits": { "total": 1, - "max_score": 0.2876821, + "max_score": 0.80259144, "hits": [ { "_index": "index", "_type": "_doc", "_id": "1", - "_score": 0.2876821, + "_score": 0.80259144, "_source": { "body": "Ski resort" } @@ -193,20 +193,20 @@ GET index/_search "took": 2, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, "hits": { "total": 1, - "max_score": 0.2876821, + "max_score": 0.80259144, "hits": [ { "_index": "index", "_type": "_doc", "_id": "1", - "_score": 0.2876821, + "_score": 0.80259144, "_source": { "body": "Ski resort" } diff --git a/docs/reference/indices/flush.asciidoc b/docs/reference/indices/flush.asciidoc index db1f7c2fe00..8583afc96ab 100644 --- a/docs/reference/indices/flush.asciidoc +++ b/docs/reference/indices/flush.asciidoc @@ -106,11 +106,7 @@ which returns something similar to: "num_docs" : 0 } } - ], - "1": ..., - "2": ..., - "3": ..., - "4": ... + ] } } } @@ -120,10 +116,6 @@ which returns something similar to: // TESTRESPONSE[s/"translog_uuid" : "hnOG3xFcTDeoI_kvvvOdNA"/"translog_uuid": $body.indices.twitter.shards.0.0.commit.user_data.translog_uuid/] // TESTRESPONSE[s/"history_uuid" : "XP7KDJGiS1a2fHYiFL5TXQ"/"history_uuid": $body.indices.twitter.shards.0.0.commit.user_data.history_uuid/] // TESTRESPONSE[s/"sync_id" : "AVvFY-071siAOuFGEO9P"/"sync_id": $body.indices.twitter.shards.0.0.commit.user_data.sync_id/] -// TESTRESPONSE[s/"1": \.\.\./"1": $body.indices.twitter.shards.1/] -// TESTRESPONSE[s/"2": \.\.\./"2": $body.indices.twitter.shards.2/] -// TESTRESPONSE[s/"3": \.\.\./"3": $body.indices.twitter.shards.3/] -// TESTRESPONSE[s/"4": \.\.\./"4": $body.indices.twitter.shards.4/] <1> the `sync id` marker [float] diff --git a/docs/reference/indices/shrink-index.asciidoc b/docs/reference/indices/shrink-index.asciidoc index 496ae7253ce..34e90e6799d 100644 --- a/docs/reference/indices/shrink-index.asciidoc +++ b/docs/reference/indices/shrink-index.asciidoc @@ -42,7 +42,7 @@ PUT /my_source_index/_settings } -------------------------------------------------- // CONSOLE -// TEST[s/^/PUT my_source_index\n/] +// TEST[s/^/PUT my_source_index\n{"settings":{"index.number_of_shards":2}}\n/] <1> Forces the relocation of a copy of each shard to the node with name `shrink_node_name`. See <> for more options. @@ -119,7 +119,7 @@ POST my_source_index/_shrink/my_target_index?copy_settings=true } -------------------------------------------------- // CONSOLE -// TEST[s/^/PUT my_source_index\n{"settings": {"index.blocks.write": true}}\n/] +// TEST[s/^/PUT my_source_index\n{"settings": {"index.number_of_shards":5,"index.blocks.write": true}}\n/] <1> The number of shards in the target index. This must be a factor of the number of shards in the source index. diff --git a/docs/reference/mapping/params/normalizer.asciidoc b/docs/reference/mapping/params/normalizer.asciidoc index 723f79c5dc4..3688a0e9454 100644 --- a/docs/reference/mapping/params/normalizer.asciidoc +++ b/docs/reference/mapping/params/normalizer.asciidoc @@ -83,31 +83,31 @@ both index and query time. "took": $body.took, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, "hits": { "total": 2, - "max_score": 0.2876821, + "max_score": 0.47000363, "hits": [ { "_index": "index", "_type": "_doc", - "_id": "2", - "_score": 0.2876821, + "_id": "1", + "_score": 0.47000363, "_source": { - "foo": "bar" + "foo": "BÀR" } }, { "_index": "index", "_type": "_doc", - "_id": "1", - "_score": 0.2876821, + "_id": "2", + "_score": 0.47000363, "_source": { - "foo": "BÀR" + "foo": "bar" } } ] @@ -144,8 +144,8 @@ returns "took": 43, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, diff --git a/docs/reference/mapping/types/percolator.asciidoc b/docs/reference/mapping/types/percolator.asciidoc index b5226b53ba0..066d3ce1ac5 100644 --- a/docs/reference/mapping/types/percolator.asciidoc +++ b/docs/reference/mapping/types/percolator.asciidoc @@ -194,8 +194,8 @@ now returns matches from the new index: "took": 3, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, @@ -389,8 +389,8 @@ This results in a response like this: "took": 6, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, @@ -549,8 +549,8 @@ GET /my_queries1/_search "took": 6, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped": 0, "failed": 0 }, diff --git a/docs/reference/query-dsl/percolate-query.asciidoc b/docs/reference/query-dsl/percolate-query.asciidoc index b6e465e34df..0d2661c37b8 100644 --- a/docs/reference/query-dsl/percolate-query.asciidoc +++ b/docs/reference/query-dsl/percolate-query.asciidoc @@ -83,8 +83,8 @@ The above request will yield the following response: "took": 13, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, @@ -227,8 +227,8 @@ GET /my-index/_search "took": 13, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, @@ -299,7 +299,7 @@ Index response: "failed": 0 }, "result": "created", - "_seq_no" : 0, + "_seq_no" : 1, "_primary_term" : 1 } -------------------------------------------------- @@ -407,8 +407,8 @@ This will yield the following response. "took": 7, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, @@ -512,8 +512,8 @@ The slightly different response: "took": 13, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, @@ -608,8 +608,8 @@ The above search request returns a response similar to this: "took": 13, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, diff --git a/docs/reference/query-dsl/terms-set-query.asciidoc b/docs/reference/query-dsl/terms-set-query.asciidoc index 35bc17e1f0f..29b349c3b7a 100644 --- a/docs/reference/query-dsl/terms-set-query.asciidoc +++ b/docs/reference/query-dsl/terms-set-query.asciidoc @@ -68,20 +68,20 @@ Response: "took": 13, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, "hits": { "total": 1, - "max_score": 0.5753642, + "max_score": 0.87546873, "hits": [ { "_index": "my-index", "_type": "_doc", "_id": "2", - "_score": 0.5753642, + "_score": 0.87546873, "_source": { "codes": ["def", "ghi"], "required_matches": 2 diff --git a/docs/reference/search/count.asciidoc b/docs/reference/search/count.asciidoc index f1c6cf7c573..5c01fa53d45 100644 --- a/docs/reference/search/count.asciidoc +++ b/docs/reference/search/count.asciidoc @@ -37,8 +37,8 @@ tweets from the `twitter` index for a certain user. The result is: { "count" : 1, "_shards" : { - "total" : 5, - "successful" : 5, + "total" : 1, + "successful" : 1, "skipped" : 0, "failed" : 0 } diff --git a/docs/reference/search/search-shards.asciidoc b/docs/reference/search/search-shards.asciidoc index 1a7c4554576..90ee35afa61 100644 --- a/docs/reference/search/search-shards.asciidoc +++ b/docs/reference/search/search-shards.asciidoc @@ -18,7 +18,7 @@ Full example: GET /twitter/_search_shards -------------------------------------------------- // CONSOLE -// TEST[s/^/PUT twitter\n/] +// TEST[s/^/PUT twitter\n{"settings":{"index.number_of_shards":5}}\n/] This will yield the following result: @@ -103,7 +103,7 @@ And specifying the same request, this time with a routing value: GET /twitter/_search_shards?routing=foo,bar -------------------------------------------------- // CONSOLE -// TEST[s/^/PUT twitter\n/] +// TEST[s/^/PUT twitter\n{"settings":{"index.number_of_shards":5}}\n/] This will yield the following result: diff --git a/docs/reference/search/suggesters/completion-suggest.asciidoc b/docs/reference/search/suggesters/completion-suggest.asciidoc index e3101a5dfb4..9f9833bde9d 100644 --- a/docs/reference/search/suggesters/completion-suggest.asciidoc +++ b/docs/reference/search/suggesters/completion-suggest.asciidoc @@ -177,8 +177,8 @@ returns this response: -------------------------------------------------- { "_shards" : { - "total" : 5, - "successful" : 5, + "total" : 1, + "successful" : 1, "skipped" : 0, "failed" : 0 }, @@ -251,8 +251,8 @@ Which should look like: "took": 6, "timed_out": false, "_shards" : { - "total" : 5, - "successful" : 5, + "total" : 1, + "successful" : 1, "skipped" : 0, "failed" : 0 }, diff --git a/docs/reference/search/validate.asciidoc b/docs/reference/search/validate.asciidoc index 2c0c8821355..20894e5773a 100644 --- a/docs/reference/search/validate.asciidoc +++ b/docs/reference/search/validate.asciidoc @@ -218,8 +218,8 @@ Response: { "valid": true, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "failed": 0 }, "explanations": [ @@ -227,31 +227,7 @@ Response: "index": "twitter", "shard": 0, "valid": true, - "explanation": "user:kimchy~2" - }, - { - "index": "twitter", - "shard": 1, - "valid": true, - "explanation": "user:kimchy~2" - }, - { - "index": "twitter", - "shard": 2, - "valid": true, - "explanation": "user:kimchy~2" - }, - { - "index": "twitter", - "shard": 3, - "valid": true, - "explanation": "(user:kimchi)^0.8333333" - }, - { - "index": "twitter", - "shard": 4, - "valid": true, - "explanation": "user:kimchy" + "explanation": "(user:kimchi)^0.8333333 user:kimchy" } ] } diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/35_search_failures.yml b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/35_search_failures.yml index 605891d2b32..70e78f7e36b 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/35_search_failures.yml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/35_search_failures.yml @@ -1,5 +1,12 @@ --- "Response format for search failures": + - do: + indices.create: + index: source + body: + settings: + index.number_of_shards: 2 + - do: index: index: source @@ -26,7 +33,7 @@ - match: {updated: 0} - match: {version_conflicts: 0} - match: {batches: 0} - - is_true: failures.0.shard + - match: {failures.0.shard: 0} - match: {failures.0.index: source} - is_true: failures.0.node - match: {failures.0.reason.type: script_exception} diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/35_search_failure.yml b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/35_search_failure.yml index 8ace77eee59..17f422453ce 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/35_search_failure.yml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/35_search_failure.yml @@ -1,5 +1,12 @@ --- "Response format for search failures": + - do: + indices.create: + index: source + body: + settings: + index.number_of_shards: 2 + - do: index: index: source @@ -22,7 +29,7 @@ - match: {updated: 0} - match: {version_conflicts: 0} - match: {batches: 0} - - is_true: failures.0.shard + - match: {failures.0.shard: 0} - match: {failures.0.index: source} - is_true: failures.0.node - match: {failures.0.reason.type: script_exception} diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml index 7726a1df0b1..8617ecc1fe2 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml @@ -161,7 +161,7 @@ search: index: my_remote_cluster:aliased_test_index,my_remote_cluster:field_caps_index_1 - - match: { _shards.total: 8 } + - match: { _shards.total: 4 } - match: { hits.total: 2 } - match: { hits.hits.0._source.filter_field: 1 } - match: { hits.hits.0._index: "my_remote_cluster:test_index" } diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/remote_cluster/10_basic.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/remote_cluster/10_basic.yml index d37bb5a1825..c2840c1ce98 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/remote_cluster/10_basic.yml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/remote_cluster/10_basic.yml @@ -27,6 +27,8 @@ indices.create: index: field_caps_index_1 body: + settings: + index.number_of_shards: 1 mappings: t: properties: @@ -51,6 +53,8 @@ indices.create: index: field_caps_index_3 body: + settings: + index.number_of_shards: 1 mappings: t: properties: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.templates/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.templates/10_basic.yml index 403b0b740c5..78b7a427757 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.templates/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.templates/10_basic.yml @@ -14,6 +14,8 @@ --- "No templates": + - skip: + features: default_shards - do: cat.templates: {} @@ -174,6 +176,8 @@ --- "Sort templates": + - skip: + features: default_shards - do: indices.put_template: name: test @@ -222,6 +226,8 @@ --- "Multiple template": + - skip: + features: default_shards - do: indices.put_template: name: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml index 4d98eade8f7..a88b37ead31 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml @@ -24,7 +24,8 @@ settings: # ensure everything is allocated on a single node index.routing.allocation.include._id: $master - number_of_replicas: 0 + index.number_of_shards: 2 + index.number_of_replicas: 0 - do: index: index: source diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml index 07b3515b50c..ee7b2215d21 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml @@ -20,7 +20,8 @@ settings: # ensure everything is allocated on a single node index.routing.allocation.include._id: $master - number_of_replicas: 0 + index.number_of_shards: 2 + index.number_of_replicas: 0 mappings: test: properties: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml index 6e595921d7f..50438384b3a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml @@ -19,6 +19,7 @@ settings: # ensure everything is allocated on the master node index.routing.allocation.include._id: $master + index.number_of_shards: 2 index.number_of_replicas: 0 index.merge.scheduler.max_merge_count: 4 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/240_max_buckets.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/240_max_buckets.yml index 86c6c632d5e..f8d960e0c25 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/240_max_buckets.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/240_max_buckets.yml @@ -17,14 +17,14 @@ setup: index: test type: doc id: 1 - body: { "date": "2014-03-03T00:00:00", "keyword": "foo" } + body: { "date": "2014-03-03T00:00:00", "keyword": "dgx" } - do: index: index: test type: doc id: 2 - body: { "date": "2015-03-03T00:00:00", "keyword": "bar" } + body: { "date": "2015-03-03T00:00:00", "keyword": "dfs" } - do: index: @@ -38,7 +38,36 @@ setup: index: test type: doc id: 4 - body: { "date": "2017-03-03T00:00:00" } + body: { "date": "2017-03-03T00:00:00", "keyword": "foo" } + + - do: + index: + index: test + type: doc + id: 5 + body: { "date": "2018-03-03T00:00:00", "keyword": "bar" } + + - do: + index: + index: test + type: doc + id: 6 + body: { "date": "2019-03-03T00:00:00", "keyword": "baz" } + + - do: + index: + index: test + type: doc + id: 7 + body: { "date": "2020-03-03T00:00:00", "keyword": "qux" } + + - do: + index: + index: test + type: doc + id: 8 + body: { "date": "2021-03-03T00:00:00", "keyword": "quux" } + - do: indices.refresh: diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 166aad3ecaa..0d8a374e66d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -366,8 +366,14 @@ public class MetaDataCreateIndexService extends AbstractComponent { } // now, put the request settings, so they override templates indexSettingsBuilder.put(request.settings()); + if (indexSettingsBuilder.get(SETTING_VERSION_CREATED) == null) { + DiscoveryNodes nodes = currentState.nodes(); + final Version createdVersion = Version.min(Version.CURRENT, nodes.getSmallestNonClientNodeVersion()); + indexSettingsBuilder.put(SETTING_VERSION_CREATED, createdVersion); + } if (indexSettingsBuilder.get(SETTING_NUMBER_OF_SHARDS) == null) { - indexSettingsBuilder.put(SETTING_NUMBER_OF_SHARDS, settings.getAsInt(SETTING_NUMBER_OF_SHARDS, 5)); + final int numberOfShards = getNumberOfShards(indexSettingsBuilder); + indexSettingsBuilder.put(SETTING_NUMBER_OF_SHARDS, settings.getAsInt(SETTING_NUMBER_OF_SHARDS, numberOfShards)); } if (indexSettingsBuilder.get(SETTING_NUMBER_OF_REPLICAS) == null) { indexSettingsBuilder.put(SETTING_NUMBER_OF_REPLICAS, settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, 1)); @@ -376,12 +382,6 @@ public class MetaDataCreateIndexService extends AbstractComponent { indexSettingsBuilder.put(SETTING_AUTO_EXPAND_REPLICAS, settings.get(SETTING_AUTO_EXPAND_REPLICAS)); } - if (indexSettingsBuilder.get(SETTING_VERSION_CREATED) == null) { - DiscoveryNodes nodes = currentState.nodes(); - final Version createdVersion = Version.min(Version.CURRENT, nodes.getSmallestNonClientNodeVersion()); - indexSettingsBuilder.put(SETTING_VERSION_CREATED, createdVersion); - } - if (indexSettingsBuilder.get(SETTING_CREATION_DATE) == null) { indexSettingsBuilder.put(SETTING_CREATION_DATE, new DateTime(DateTimeZone.UTC).getMillis()); } @@ -573,6 +573,18 @@ public class MetaDataCreateIndexService extends AbstractComponent { } } + static int getNumberOfShards(final Settings.Builder indexSettingsBuilder) { + // TODO: this logic can be removed when the current major version is 8 + assert Version.CURRENT.major == 7; + final int numberOfShards; + if (Version.fromId(Integer.parseInt(indexSettingsBuilder.get(SETTING_VERSION_CREATED))).before(Version.V_7_0_0_alpha1)) { + numberOfShards = 5; + } else { + numberOfShards = 1; + } + return numberOfShards; + } + @Override public void onFailure(String source, Exception e) { if (e instanceof ResourceAlreadyExistsException) { diff --git a/server/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java b/server/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java index d887387d43f..ccdc1d6ab33 100644 --- a/server/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java @@ -96,7 +96,7 @@ public class FilteringAllocationIT extends ESIntegTestCase { logger.info("--> creating an index with no replicas"); client().admin().indices().prepareCreate("test") - .setSettings(Settings.builder().put("index.number_of_replicas", 0)) + .setSettings(Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0)) .execute().actionGet(); ensureGreen(); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java index ad36457bde5..de8251ece25 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java @@ -185,7 +185,7 @@ public class IndexCreationTaskTests extends ESTestCase { public void testDefaultSettings() throws Exception { final ClusterState result = executeTask(); - assertThat(result.getMetaData().index("test").getSettings().get(SETTING_NUMBER_OF_SHARDS), equalTo("5")); + assertThat(result.getMetaData().index("test").getSettings().get(SETTING_NUMBER_OF_SHARDS), equalTo("1")); } public void testSettingsFromClusterState() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java index d5f3d71d7ee..24f5a696561 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java @@ -56,6 +56,7 @@ import java.util.stream.Collectors; import java.util.stream.Stream; import static java.util.Collections.emptyMap; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_CREATED; import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; @@ -92,6 +93,21 @@ public class MetaDataCreateIndexServiceTests extends ESTestCase { return source * x == target; } + public void testNumberOfShards() { + { + final Version versionCreated = VersionUtils.randomVersionBetween( + random(), + Version.V_6_0_0_alpha1, VersionUtils.getPreviousVersion(Version.V_7_0_0_alpha1)); + final Settings.Builder indexSettingsBuilder = Settings.builder().put(SETTING_VERSION_CREATED, versionCreated); + assertThat(MetaDataCreateIndexService.IndexCreationTask.getNumberOfShards(indexSettingsBuilder), equalTo(5)); + } + { + final Version versionCreated = VersionUtils.randomVersionBetween(random(), Version.V_7_0_0_alpha1, Version.CURRENT); + final Settings.Builder indexSettingsBuilder = Settings.builder().put(SETTING_VERSION_CREATED, versionCreated); + assertThat(MetaDataCreateIndexService.IndexCreationTask.getNumberOfShards(indexSettingsBuilder), equalTo(1)); + } + } + public void testValidateShrinkIndex() { int numShards = randomIntBetween(2, 42); ClusterState state = createClusterState("source", numShards, randomIntBetween(0, 10), diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java index 950bb14eed9..ab99bc0d97b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java @@ -21,7 +21,10 @@ package org.elasticsearch.test.rest.yaml; import com.carrotsearch.randomizedtesting.RandomizedTest; import org.apache.http.HttpHost; +import org.apache.http.entity.StringEntity; +import org.apache.http.message.BasicHeader; import org.elasticsearch.Version; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; @@ -29,6 +32,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestApi; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestSpec; @@ -38,6 +42,7 @@ import org.elasticsearch.test.rest.yaml.section.DoSection; import org.elasticsearch.test.rest.yaml.section.ExecutableSection; import org.junit.AfterClass; import org.junit.Before; +import org.junit.BeforeClass; import java.io.IOException; import java.nio.file.Files; @@ -94,6 +99,13 @@ public abstract class ESClientYamlSuiteTestCase extends ESRestTestCase { this.testCandidate = testCandidate; } + private static boolean useDefaultNumberOfShards; + + @BeforeClass + public static void initializeUseDefaultNumberOfShards() { + useDefaultNumberOfShards = usually(); + } + @Before public void initAndResetContext() throws Exception { if (restTestExecutionContext == null) { @@ -318,6 +330,14 @@ public abstract class ESClientYamlSuiteTestCase extends ESRestTestCase { throw new IllegalArgumentException("No executable sections loaded for [" + testCandidate.getTestPath() + "]"); } + if (useDefaultNumberOfShards == false + && testCandidate.getTestSection().getSkipSection().getFeatures().contains("default_shards") == false) { + final Request request = new Request("PUT", "/_template/global"); + request.setHeaders(new BasicHeader("Content-Type", XContentType.JSON.mediaTypeWithoutParameters())); + request.setEntity(new StringEntity("{\"index_patterns\":[\"*\"],\"settings\":{\"index.number_of_shards\":2}}")); + adminClient().performRequest(request); + } + if (!testCandidate.getSetupSection().isEmpty()) { logger.debug("start setup test [{}]", testCandidate.getTestPath()); for (DoSection doSection : testCandidate.getSetupSection().getDoSections()) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java index ab9be65514a..d074dd82af7 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java @@ -37,6 +37,7 @@ import static java.util.Collections.unmodifiableList; public final class Features { private static final List SUPPORTED = unmodifiableList(Arrays.asList( "catch_unauthorized", + "default_shards", "embedded_stash_key", "headers", "stash_in_key", diff --git a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/RestSqlSecurityIT.java b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/RestSqlSecurityIT.java index 5833ef6dae5..f7abb6f64f6 100644 --- a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/RestSqlSecurityIT.java +++ b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/RestSqlSecurityIT.java @@ -236,11 +236,7 @@ public class RestSqlSecurityIT extends SqlSecurityTestCase { createAuditLogAsserter() .expectSqlCompositeAction("test_admin", "test") .expect(true, SQL_ACTION_NAME, "full_access", empty()) - // One scroll access denied per shard - .expect("access_denied", SQL_ACTION_NAME, "full_access", "default_native", empty(), "InternalScrollSearchRequest") - .expect("access_denied", SQL_ACTION_NAME, "full_access", "default_native", empty(), "InternalScrollSearchRequest") - .expect("access_denied", SQL_ACTION_NAME, "full_access", "default_native", empty(), "InternalScrollSearchRequest") - .expect("access_denied", SQL_ACTION_NAME, "full_access", "default_native", empty(), "InternalScrollSearchRequest") + // one scroll access denied per shard .expect("access_denied", SQL_ACTION_NAME, "full_access", "default_native", empty(), "InternalScrollSearchRequest") .assertLogs(); } From 2f4212b80a86724fbe73e83c8063f8deccd87f48 Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Mon, 14 May 2018 19:56:50 +0200 Subject: [PATCH 10/31] Fold RestGetAllSettingsAction in RestGetSettingsAction (#30561) We currently have a separate endpoint for retrieving settings from all indices. We introduced such endpoint when removing comma-separated feature parsing for GetIndicesAction. The RestGetAllSettingsAction duplicates the code to print out the response that we already have in GetSettingsResponse (since it became a ToXContentObject), and uses the get index API internally instead of the get settings API, but the response is the same, hence we can fold get all settings and get settings in a single API, which is what this commit does. --- .../elasticsearch/action/ActionModule.java | 2 - .../indices/RestGetAllSettingsAction.java | 121 ------------------ .../admin/indices/RestGetSettingsAction.java | 5 +- 3 files changed, 1 insertion(+), 127 deletions(-) delete mode 100644 server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAllSettingsAction.java diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index 42ff4322403..fa4d751a54a 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -253,7 +253,6 @@ import org.elasticsearch.rest.action.admin.indices.RestForceMergeAction; import org.elasticsearch.rest.action.admin.indices.RestGetAliasesAction; import org.elasticsearch.rest.action.admin.indices.RestGetAllAliasesAction; import org.elasticsearch.rest.action.admin.indices.RestGetAllMappingsAction; -import org.elasticsearch.rest.action.admin.indices.RestGetAllSettingsAction; import org.elasticsearch.rest.action.admin.indices.RestGetFieldMappingAction; import org.elasticsearch.rest.action.admin.indices.RestGetIndexTemplateAction; import org.elasticsearch.rest.action.admin.indices.RestGetIndicesAction; @@ -558,7 +557,6 @@ public class ActionModule extends AbstractModule { registerHandler.accept(new RestGetAllAliasesAction(settings, restController)); registerHandler.accept(new RestGetAllMappingsAction(settings, restController)); - registerHandler.accept(new RestGetAllSettingsAction(settings, restController, indexScopedSettings, settingsFilter)); registerHandler.accept(new RestGetIndicesAction(settings, restController, indexScopedSettings, settingsFilter)); registerHandler.accept(new RestIndicesStatsAction(settings, restController)); registerHandler.accept(new RestIndicesSegmentsAction(settings, restController)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAllSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAllSettingsAction.java deleted file mode 100644 index f51cee37ad3..00000000000 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAllSettingsAction.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.rest.action.admin.indices; - -import org.elasticsearch.action.admin.indices.get.GetIndexRequest; -import org.elasticsearch.action.admin.indices.get.GetIndexRequest.Feature; -import org.elasticsearch.action.admin.indices.get.GetIndexResponse; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.cluster.metadata.AliasMetaData; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.IndexScopedSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsFilter; -import org.elasticsearch.common.xcontent.ToXContent.Params; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestController; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.action.RestBuilderListener; - -import java.io.IOException; -import java.util.List; -import java.util.Set; - -import static org.elasticsearch.rest.RestRequest.Method.GET; -import static org.elasticsearch.rest.RestRequest.Method.HEAD; -import static org.elasticsearch.rest.RestStatus.OK; - -/** - * The REST handler for retrieving all settings - */ -public class RestGetAllSettingsAction extends BaseRestHandler { - - private final IndexScopedSettings indexScopedSettings; - private final SettingsFilter settingsFilter; - - public RestGetAllSettingsAction(final Settings settings, final RestController controller, - final IndexScopedSettings indexScopedSettings, final SettingsFilter settingsFilter) { - super(settings); - this.indexScopedSettings = indexScopedSettings; - controller.registerHandler(GET, "/_settings", this); - this.settingsFilter = settingsFilter; - } - - @Override - public String getName() { - return "get_all_settings_action"; - } - - @Override - public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - final GetIndexRequest getIndexRequest = new GetIndexRequest(); - getIndexRequest.indices(Strings.EMPTY_ARRAY); - getIndexRequest.features(Feature.SETTINGS); - getIndexRequest.indicesOptions(IndicesOptions.fromRequest(request, getIndexRequest.indicesOptions())); - getIndexRequest.local(request.paramAsBoolean("local", getIndexRequest.local())); - getIndexRequest.humanReadable(request.paramAsBoolean("human", false)); - // This is required so the "flat_settings" parameter counts as consumed - request.paramAsBoolean("flat_settings", false); - final boolean defaults = request.paramAsBoolean("include_defaults", false); - return channel -> client.admin().indices().getIndex(getIndexRequest, new RestBuilderListener(channel) { - - @Override - public RestResponse buildResponse(final GetIndexResponse response, final XContentBuilder builder) throws Exception { - builder.startObject(); - { - for (final String index : response.indices()) { - builder.startObject(index); - { - writeSettings(response.settings().get(index), builder, request, defaults); - } - builder.endObject(); - } - } - builder.endObject(); - - return new BytesRestResponse(OK, builder); - } - - - private void writeSettings(final Settings settings, final XContentBuilder builder, - final Params params, final boolean defaults) throws IOException { - builder.startObject("settings"); - { - settings.toXContent(builder, params); - } - builder.endObject(); - if (defaults) { - builder.startObject("defaults"); - { - settingsFilter - .filter(indexScopedSettings.diff(settings, RestGetAllSettingsAction.this.settings)) - .toXContent(builder, request); - } - builder.endObject(); - } - } - }); - } - -} diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java index 9791994c773..d9fa50cf941 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java @@ -19,16 +19,12 @@ package org.elasticsearch.rest.action.admin.indices; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; - import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; @@ -46,6 +42,7 @@ public class RestGetSettingsAction extends BaseRestHandler { public RestGetSettingsAction(Settings settings, RestController controller) { super(settings); + controller.registerHandler(GET, "/_settings", this); controller.registerHandler(GET, "/_settings/{name}", this); controller.registerHandler(GET, "/{index}/_settings", this); controller.registerHandler(GET, "/{index}/_settings/{name}", this); From d5f028e0853609a61ee59199784d6abad547191c Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Mon, 14 May 2018 20:12:52 +0200 Subject: [PATCH 11/31] Auto-expand replicas only after failing nodes (#30553) #30423 combined auto-expansion in the same cluster state update where nodes are removed. As the auto-expansion step would run before deassociating the dead nodes from the routing table, the auto-expansion would possibly remove replicas from live nodes instead of dead ones. This commit reverses the order to ensure that when nodes leave the cluster that the auto-expand-replica functionality only triggers after failing the shards on the removed nodes. This ensures that active shards on other live nodes are not failed if the primary resided on a now dead node. Instead, one of the replicas on the live nodes first gets promoted to primary, and the auto- expansion (removing replicas) only triggers in a follow-up step (but still same cluster state update). Relates to #30456 and follow-up of #30423 --- .../routing/allocation/AllocationService.java | 42 +++--- .../discovery/zen/NodeJoinController.java | 4 +- .../metadata/AutoExpandReplicasTests.java | 128 ++++++++++++++++++ .../indices/cluster/ClusterStateChanges.java | 10 ++ 4 files changed, 164 insertions(+), 20 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index deb10b83b5a..569ddd6cee7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -114,11 +114,24 @@ public class AllocationService extends AbstractComponent { } protected ClusterState buildResultAndLogHealthChange(ClusterState oldState, RoutingAllocation allocation, String reason) { - RoutingTable oldRoutingTable = oldState.routingTable(); - RoutingNodes newRoutingNodes = allocation.routingNodes(); + ClusterState newState = buildResult(oldState, allocation); + + logClusterHealthStateChange( + new ClusterStateHealth(oldState), + new ClusterStateHealth(newState), + reason + ); + + return newState; + } + + private ClusterState buildResult(ClusterState oldState, RoutingAllocation allocation) { + final RoutingTable oldRoutingTable = oldState.routingTable(); + final RoutingNodes newRoutingNodes = allocation.routingNodes(); final RoutingTable newRoutingTable = new RoutingTable.Builder().updateNodes(oldRoutingTable.version(), newRoutingNodes).build(); - MetaData newMetaData = allocation.updateMetaDataWithRoutingChanges(newRoutingTable); + final MetaData newMetaData = allocation.updateMetaDataWithRoutingChanges(newRoutingTable); assert newRoutingTable.validate(newMetaData); // validates the routing table is coherent with the cluster state metadata + final ClusterState.Builder newStateBuilder = ClusterState.builder(oldState) .routingTable(newRoutingTable) .metaData(newMetaData); @@ -131,13 +144,7 @@ public class AllocationService extends AbstractComponent { newStateBuilder.customs(customsBuilder.build()); } } - final ClusterState newState = newStateBuilder.build(); - logClusterHealthStateChange( - new ClusterStateHealth(oldState), - new ClusterStateHealth(newState), - reason - ); - return newState; + return newStateBuilder.build(); } // Used for testing @@ -209,24 +216,23 @@ public class AllocationService extends AbstractComponent { * if needed. */ public ClusterState deassociateDeadNodes(ClusterState clusterState, boolean reroute, String reason) { - ClusterState fixedClusterState = adaptAutoExpandReplicas(clusterState); - RoutingNodes routingNodes = getMutableRoutingNodes(fixedClusterState); + RoutingNodes routingNodes = getMutableRoutingNodes(clusterState); // shuffle the unassigned nodes, just so we won't have things like poison failed shards routingNodes.unassigned().shuffle(); - RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, fixedClusterState, + RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState, clusterInfoService.getClusterInfo(), currentNanoTime()); // first, clear from the shards any node id they used to belong to that is now dead deassociateDeadNodes(allocation); - if (reroute) { - reroute(allocation); + if (allocation.routingNodesChanged()) { + clusterState = buildResult(clusterState, allocation); } - - if (fixedClusterState == clusterState && allocation.routingNodesChanged() == false) { + if (reroute) { + return reroute(clusterState, reason); + } else { return clusterState; } - return buildResultAndLogHealthChange(clusterState, allocation, reason); } /** diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java b/server/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java index e59fc8ad513..5cceba237e5 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java @@ -380,7 +380,7 @@ public class NodeJoinController extends AbstractComponent { /** * a task indicated that the current node should become master, if no current master is known */ - private static final DiscoveryNode BECOME_MASTER_TASK = new DiscoveryNode("_BECOME_MASTER_TASK_", + public static final DiscoveryNode BECOME_MASTER_TASK = new DiscoveryNode("_BECOME_MASTER_TASK_", new TransportAddress(TransportAddress.META_ADDRESS, 0), Collections.emptyMap(), Collections.emptySet(), Version.CURRENT) { @Override @@ -393,7 +393,7 @@ public class NodeJoinController extends AbstractComponent { * a task that is used to signal the election is stopped and we should process pending joins. * it may be use in combination with {@link #BECOME_MASTER_TASK} */ - private static final DiscoveryNode FINISH_ELECTION_TASK = new DiscoveryNode("_FINISH_ELECTION_", + public static final DiscoveryNode FINISH_ELECTION_TASK = new DiscoveryNode("_FINISH_ELECTION_", new TransportAddress(TransportAddress.META_ADDRESS, 0), Collections.emptyMap(), Collections.emptySet(), Version.CURRENT) { @Override public String toString() { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/AutoExpandReplicasTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/AutoExpandReplicasTests.java index 32312f34e21..f24dbfbd002 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/AutoExpandReplicasTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/AutoExpandReplicasTests.java @@ -18,8 +18,36 @@ */ package org.elasticsearch.cluster.metadata; +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.action.support.replication.ClusterStateCreationUtils; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.indices.cluster.ClusterStateChanges; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; + +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; +import static org.hamcrest.Matchers.everyItem; +import static org.hamcrest.Matchers.isIn; public class AutoExpandReplicasTests extends ESTestCase { @@ -72,4 +100,104 @@ public class AutoExpandReplicasTests extends ESTestCase { } } + + private static final AtomicInteger nodeIdGenerator = new AtomicInteger(); + + protected DiscoveryNode createNode(DiscoveryNode.Role... mustHaveRoles) { + Set roles = new HashSet<>(randomSubsetOf(Sets.newHashSet(DiscoveryNode.Role.values()))); + for (DiscoveryNode.Role mustHaveRole : mustHaveRoles) { + roles.add(mustHaveRole); + } + final String id = String.format(Locale.ROOT, "node_%03d", nodeIdGenerator.incrementAndGet()); + return new DiscoveryNode(id, id, buildNewFakeTransportAddress(), Collections.emptyMap(), roles, + Version.CURRENT); + } + + /** + * Checks that when nodes leave the cluster that the auto-expand-replica functionality only triggers after failing the shards on + * the removed nodes. This ensures that active shards on other live nodes are not failed if the primary resided on a now dead node. + * Instead, one of the replicas on the live nodes first gets promoted to primary, and the auto-expansion (removing replicas) only + * triggers in a follow-up step. + */ + public void testAutoExpandWhenNodeLeavesAndPossiblyRejoins() throws InterruptedException { + final ThreadPool threadPool = new TestThreadPool(getClass().getName()); + final ClusterStateChanges cluster = new ClusterStateChanges(xContentRegistry(), threadPool); + + try { + List allNodes = new ArrayList<>(); + DiscoveryNode localNode = createNode(DiscoveryNode.Role.MASTER); // local node is the master + allNodes.add(localNode); + int numDataNodes = randomIntBetween(3, 5); + List dataNodes = new ArrayList<>(numDataNodes); + for (int i = 0; i < numDataNodes; i++) { + dataNodes.add(createNode(DiscoveryNode.Role.DATA)); + } + allNodes.addAll(dataNodes); + ClusterState state = ClusterStateCreationUtils.state(localNode, localNode, allNodes.toArray(new DiscoveryNode[allNodes.size()])); + + CreateIndexRequest request = new CreateIndexRequest("index", + Settings.builder() + .put(SETTING_NUMBER_OF_SHARDS, 1) + .put(SETTING_AUTO_EXPAND_REPLICAS, "0-all").build()) + .waitForActiveShards(ActiveShardCount.NONE); + state = cluster.createIndex(state, request); + assertTrue(state.metaData().hasIndex("index")); + while (state.routingTable().index("index").shard(0).allShardsStarted() == false) { + logger.info(state); + state = cluster.applyStartedShards(state, + state.routingTable().index("index").shard(0).shardsWithState(ShardRoutingState.INITIALIZING)); + state = cluster.reroute(state, new ClusterRerouteRequest()); + } + + IndexShardRoutingTable preTable = state.routingTable().index("index").shard(0); + final Set unchangedNodeIds; + final IndexShardRoutingTable postTable; + + if (randomBoolean()) { + // simulate node removal + List nodesToRemove = randomSubsetOf(2, dataNodes); + unchangedNodeIds = dataNodes.stream().filter(n -> nodesToRemove.contains(n) == false) + .map(DiscoveryNode::getId).collect(Collectors.toSet()); + + state = cluster.removeNodes(state, nodesToRemove); + postTable = state.routingTable().index("index").shard(0); + + assertTrue("not all shards started in " + state.toString(), postTable.allShardsStarted()); + assertThat(postTable.toString(), postTable.getAllAllocationIds(), everyItem(isIn(preTable.getAllAllocationIds()))); + } else { + // fake an election where conflicting nodes are removed and readded + state = ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).masterNodeId(null).build()).build(); + + List conflictingNodes = randomSubsetOf(2, dataNodes); + unchangedNodeIds = dataNodes.stream().filter(n -> conflictingNodes.contains(n) == false) + .map(DiscoveryNode::getId).collect(Collectors.toSet()); + + List nodesToAdd = conflictingNodes.stream() + .map(n -> new DiscoveryNode(n.getName(), n.getId(), buildNewFakeTransportAddress(), n.getAttributes(), n.getRoles(), n.getVersion())) + .collect(Collectors.toList()); + + if (randomBoolean()) { + nodesToAdd.add(createNode(DiscoveryNode.Role.DATA)); + } + + state = cluster.joinNodesAndBecomeMaster(state, nodesToAdd); + postTable = state.routingTable().index("index").shard(0); + } + + Set unchangedAllocationIds = preTable.getShards().stream().filter(shr -> unchangedNodeIds.contains(shr.currentNodeId())) + .map(shr -> shr.allocationId().getId()).collect(Collectors.toSet()); + + assertThat(postTable.toString(), unchangedAllocationIds, everyItem(isIn(postTable.getAllAllocationIds()))); + + postTable.getShards().forEach( + shardRouting -> { + if (shardRouting.assignedToNode() && unchangedAllocationIds.contains(shardRouting.allocationId().getId())) { + assertTrue("Shard should be active: " + shardRouting, shardRouting.active()); + } + } + ); + } finally { + terminate(threadPool); + } + } } diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java index 9e8638af249..8bfd08244e4 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java @@ -87,6 +87,7 @@ import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; @@ -232,6 +233,15 @@ public class ClusterStateChanges extends AbstractComponent { return runTasks(joinTaskExecutor, clusterState, nodes); } + public ClusterState joinNodesAndBecomeMaster(ClusterState clusterState, List nodes) { + List joinNodes = new ArrayList<>(); + joinNodes.add(NodeJoinController.BECOME_MASTER_TASK); + joinNodes.add(NodeJoinController.FINISH_ELECTION_TASK); + joinNodes.addAll(nodes); + + return runTasks(joinTaskExecutor, clusterState, joinNodes); + } + public ClusterState removeNodes(ClusterState clusterState, List nodes) { return runTasks(nodeRemovalExecutor, clusterState, nodes.stream() .map(n -> new ZenDiscovery.NodeRemovalClusterStateTaskExecutor.Task(n, "dummy reason")).collect(Collectors.toList())); From df852fbdd9a28b3e58ff670f20a079acc52194ea Mon Sep 17 00:00:00 2001 From: javanna Date: Mon, 14 May 2018 20:23:43 +0200 Subject: [PATCH 12/31] Fix non existing javadocs link in RestClientTests --- .../src/test/java/org/elasticsearch/client/RestClientTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java index 872b327954b..ea124828e45 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java @@ -96,7 +96,7 @@ public class RestClientTests extends RestClientTestCase { } /** - * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testSetParameters()}. + * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testAddParameters()}. */ @Deprecated public void testPerformOldStyleAsyncWithNullParams() throws Exception { From b30f2913cf68d1f2b7bd9b7e5d626c073f545578 Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Mon, 14 May 2018 15:54:42 -0400 Subject: [PATCH 13/31] Docs: document precision limitations of geo_bounding_box (#30540) The geo_bounding_box query might produce false positives alongside the right and upper edges and false negatives alongside left and bottom edges. This commit documents the behavior and defines the maximum error. Closes #29196 --- .../query-dsl/geo-bounding-box-query.asciidoc | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc index a1b427acf27..21a689703e0 100644 --- a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc +++ b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc @@ -329,3 +329,16 @@ and will not match any documents for this query. This can be useful when querying multiple indexes which might have different mappings. When set to `false` (the default value) the query will throw an exception if the field is not mapped. + +[float] +==== Notes on Precision + +Geopoints have limited precision and are always rounded down during index time. +During the query time, upper boundaries of the bounding boxes are rounded down, +while lower boundaries are rounded up. As a result, the points along on the +lower bounds (bottom and left edges of the bounding box) might not make it into +the bounding box due to the rounding error. At the same time points alongside +the upper bounds (top and right edges) might be selected by the query even if +they are located slightly outside the edge. The rounding error should be less +than 4.20e-8 degrees on the latitude and less than 8.39e-8 degrees on the +longitude, which translates to less than 1cm error even at the equator. From fa45c6c9a63d25bc4abb89f49014a7774acffbf0 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Mon, 14 May 2018 13:07:27 -0700 Subject: [PATCH 14/31] [DOCS] Fix path info for various security files (#30502) --- x-pack/docs/en/commands/syskeygen.asciidoc | 2 +- x-pack/docs/en/security/auditing.asciidoc | 2 +- .../configuring-ldap-realm.asciidoc | 2 +- .../docs/en/security/authorization.asciidoc | 2 +- .../authorization/mapping-roles.asciidoc | 2 +- .../docs/en/security/reference/files.asciidoc | 6 +++--- .../configuring-tls-docker.asciidoc | 20 +++++++++---------- .../securing-communications/tls-http.asciidoc | 6 +++--- .../securing-communications/tls-ldap.asciidoc | 2 +- .../tls-transport.asciidoc | 6 +++--- .../en/setup/bootstrap-checks-xes.asciidoc | 2 +- .../en/watcher/trigger/schedule/cron.asciidoc | 2 +- 12 files changed, 27 insertions(+), 27 deletions(-) diff --git a/x-pack/docs/en/commands/syskeygen.asciidoc b/x-pack/docs/en/commands/syskeygen.asciidoc index 8683d801d58..f4a198ff4bf 100644 --- a/x-pack/docs/en/commands/syskeygen.asciidoc +++ b/x-pack/docs/en/commands/syskeygen.asciidoc @@ -43,7 +43,7 @@ environment variable. === Examples The following command generates a `system_key` file in the -default `$ES_HOME/config/x-pack` directory: +default `$ES_HOME/config` directory: [source, sh] -------------------------------------------------- diff --git a/x-pack/docs/en/security/auditing.asciidoc b/x-pack/docs/en/security/auditing.asciidoc index 8bff8727f83..6cd31d076f9 100644 --- a/x-pack/docs/en/security/auditing.asciidoc +++ b/x-pack/docs/en/security/auditing.asciidoc @@ -330,7 +330,7 @@ audited in plain text when including the request body in audit events. [[logging-file]] You can also configure how the logfile is written in the `log4j2.properties` -file located in `CONFIG_DIR/x-pack`. By default, audit information is appended to the +file located in `CONFIG_DIR`. By default, audit information is appended to the `_access.log` file located in the standard Elasticsearch `logs` directory (typically located at `$ES_HOME/logs`). The file rolls over on a daily basis. diff --git a/x-pack/docs/en/security/authentication/configuring-ldap-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-ldap-realm.asciidoc index b43a0911e04..6ea9b243aad 100644 --- a/x-pack/docs/en/security/authentication/configuring-ldap-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-ldap-realm.asciidoc @@ -56,7 +56,7 @@ xpack: group_search: base_dn: "dc=example,dc=com" files: - role_mapping: "CONFIG_DIR/x-pack/role_mapping.yml" + role_mapping: "CONFIG_DIR/role_mapping.yml" unmapped_groups_as_roles: false ------------------------------------------------------------ diff --git a/x-pack/docs/en/security/authorization.asciidoc b/x-pack/docs/en/security/authorization.asciidoc index 4a3ffe399de..ed171415056 100644 --- a/x-pack/docs/en/security/authorization.asciidoc +++ b/x-pack/docs/en/security/authorization.asciidoc @@ -295,7 +295,7 @@ see {ref}/security-api-roles.html[Role Management APIs]. === File-based Role Management Apart from the _Role Management APIs_, roles can also be defined in local -`roles.yml` file located in `CONFIG_DIR/x-pack`. This is a YAML file where each +`roles.yml` file located in `CONFIG_DIR`. This is a YAML file where each role definition is keyed by its name. [IMPORTANT] diff --git a/x-pack/docs/en/security/authorization/mapping-roles.asciidoc b/x-pack/docs/en/security/authorization/mapping-roles.asciidoc index 590546e217c..2c1f1998c68 100644 --- a/x-pack/docs/en/security/authorization/mapping-roles.asciidoc +++ b/x-pack/docs/en/security/authorization/mapping-roles.asciidoc @@ -36,7 +36,7 @@ To use file based role-mappings, you must configure the mappings in a YAML file and copy it to each node in the cluster. Tools like Puppet or Chef can help with this. -By default, role mappings are stored in `ES_PATH_CONF/x-pack/role_mapping.yml`, +By default, role mappings are stored in `ES_PATH_CONF/role_mapping.yml`, where `ES_PATH_CONF` is `ES_HOME/config` (zip/tar installations) or `/etc/elasticsearch` (package installations). To specify a different location, you configure the `files.role_mapping` realm settings in `elasticsearch.yml`. diff --git a/x-pack/docs/en/security/reference/files.asciidoc b/x-pack/docs/en/security/reference/files.asciidoc index cec8f9d1a3b..dcf673d9a9f 100644 --- a/x-pack/docs/en/security/reference/files.asciidoc +++ b/x-pack/docs/en/security/reference/files.asciidoc @@ -3,7 +3,7 @@ The {security} uses the following files: -* `CONFIG_DIR/x-pack/roles.yml` defines the roles in use on the cluster +* `CONFIG_DIR/roles.yml` defines the roles in use on the cluster (read more <>). * `CONFIG_DIR/elasticsearch-users` defines the users and their hashed passwords for @@ -12,12 +12,12 @@ The {security} uses the following files: * `CONFIG_DIR/elasticsearch-users_roles` defines the user roles assignment for the the <>. -* `CONFIG_DIR/x-pack/role_mapping.yml` defines the role assignments for a +* `CONFIG_DIR/role_mapping.yml` defines the role assignments for a Distinguished Name (DN) to a role. This allows for LDAP and Active Directory groups and users and PKI users to be mapped to roles (read more <>). -* `CONFIG_DIR/x-pack/log4j2.properties` contains audit information (read more +* `CONFIG_DIR/log4j2.properties` contains audit information (read more <>). [[security-files-location]] diff --git a/x-pack/docs/en/security/securing-communications/configuring-tls-docker.asciidoc b/x-pack/docs/en/security/securing-communications/configuring-tls-docker.asciidoc index affac534b6f..d93d4e523d9 100644 --- a/x-pack/docs/en/security/securing-communications/configuring-tls-docker.asciidoc +++ b/x-pack/docs/en/security/securing-communications/configuring-tls-docker.asciidoc @@ -41,7 +41,7 @@ instances: `.env`: [source,yaml] ---- -CERTS_DIR=/usr/share/elasticsearch/config/x-pack/certificates <1> +CERTS_DIR=/usr/share/elasticsearch/config/certificates <1> ELASTIC_PASSWORD=PleaseChangeMe <2> ---- <1> The path, inside the Docker image, where certificates are expected to be found. @@ -66,18 +66,18 @@ services: image: docker.elastic.co/elasticsearch/elasticsearch-platinum:{version} command: > bash -c ' - if [[ ! -d config/x-pack/certificates/certs ]]; then - mkdir config/x-pack/certificates/certs; + if [[ ! -d config/certificates/certs ]]; then + mkdir config/certificates/certs; fi; if [[ ! -f /local/certs/bundle.zip ]]; then - bin/elasticsearch-certgen --silent --in config/x-pack/certificates/instances.yml --out config/x-pack/certificates/certs/bundle.zip; - unzip config/x-pack/certificates/certs/bundle.zip -d config/x-pack/certificates/certs; <1> + bin/elasticsearch-certgen --silent --in config/certificates/instances.yml --out config/certificates/certs/bundle.zip; + unzip config/certificates/certs/bundle.zip -d config/certificates/certs; <1> fi; - chgrp -R 0 config/x-pack/certificates/certs + chgrp -R 0 config/certificates/certs ' user: $\{UID:-1000\} working_dir: /usr/share/elasticsearch - volumes: ['.:/usr/share/elasticsearch/config/x-pack/certificates'] + volumes: ['.:/usr/share/elasticsearch/config/certificates'] ---- <1> The new node certificates and CA certificate+key are placed under the local directory `certs`. @@ -184,9 +184,9 @@ WARNING: Windows users not running PowerShell will need to remove `\` and join l ---- docker exec es01 /bin/bash -c "bin/elasticsearch-setup-passwords \ auto --batch \ --Expack.ssl.certificate=x-pack/certificates/es01/es01.crt \ --Expack.ssl.certificate_authorities=x-pack/certificates/ca/ca.crt \ --Expack.ssl.key=x-pack/certificates/es01/es01.key \ +-Expack.ssl.certificate=certificates/es01/es01.crt \ +-Expack.ssl.certificate_authorities=certificates/ca/ca.crt \ +-Expack.ssl.key=certificates/es01/es01.key \ --url https://localhost:9200" ---- -- diff --git a/x-pack/docs/en/security/securing-communications/tls-http.asciidoc b/x-pack/docs/en/security/securing-communications/tls-http.asciidoc index dae088667c6..eb8e985a65b 100644 --- a/x-pack/docs/en/security/securing-communications/tls-http.asciidoc +++ b/x-pack/docs/en/security/securing-communications/tls-http.asciidoc @@ -40,9 +40,9 @@ This name should match the `keystore.path` value. [source, yaml] -------------------------------------------------- xpack.security.http.ssl.enabled: true -xpack.security.http.ssl.key: /home/es/config/x-pack/node01.key <1> -xpack.security.http.ssl.certificate: /home/es/config/x-pack/node01.crt <2> -xpack.security.http.ssl.certificate_authorities: [ "/home/es/config/x-pack/ca.crt" ] <3> +xpack.security.http.ssl.key: /home/es/config/node01.key <1> +xpack.security.http.ssl.certificate: /home/es/config/node01.crt <2> +xpack.security.http.ssl.certificate_authorities: [ "/home/es/config/ca.crt" ] <3> -------------------------------------------------- <1> The full path to the node key file. This must be a location within the {es} configuration directory. diff --git a/x-pack/docs/en/security/securing-communications/tls-ldap.asciidoc b/x-pack/docs/en/security/securing-communications/tls-ldap.asciidoc index f10ced77f71..b7f0b7d3005 100644 --- a/x-pack/docs/en/security/securing-communications/tls-ldap.asciidoc +++ b/x-pack/docs/en/security/securing-communications/tls-ldap.asciidoc @@ -29,7 +29,7 @@ xpack: order: 0 url: "ldaps://ldap.example.com:636" ssl: - certificate_authorities: [ "CONFIG_DIR/x-pack/cacert.pem" ] + certificate_authorities: [ "CONFIG_DIR/cacert.pem" ] -------------------------------------------------- The CA certificate must be a PEM encoded. diff --git a/x-pack/docs/en/security/securing-communications/tls-transport.asciidoc b/x-pack/docs/en/security/securing-communications/tls-transport.asciidoc index 9bce211a1e2..2e20a20f907 100644 --- a/x-pack/docs/en/security/securing-communications/tls-transport.asciidoc +++ b/x-pack/docs/en/security/securing-communications/tls-transport.asciidoc @@ -43,9 +43,9 @@ This name should match the `keystore.path` value. -------------------------------------------------- xpack.security.transport.ssl.enabled: true xpack.security.transport.ssl.verification_mode: certificate <1> -xpack.security.transport.ssl.key: /home/es/config/x-pack/node01.key <2> -xpack.security.transport.ssl.certificate: /home/es/config/x-pack/node01.crt <3> -xpack.security.transport.ssl.certificate_authorities: [ "/home/es/config/x-pack/ca.crt" ] <4> +xpack.security.transport.ssl.key: /home/es/config/node01.key <2> +xpack.security.transport.ssl.certificate: /home/es/config/node01.crt <3> +xpack.security.transport.ssl.certificate_authorities: [ "/home/es/config/ca.crt" ] <4> -------------------------------------------------- <1> If you used the `--dns` or `--ip` options with the `elasticsearch-certutil cert` command and you want to enable strict hostname checking, set the verification mode to diff --git a/x-pack/docs/en/setup/bootstrap-checks-xes.asciidoc b/x-pack/docs/en/setup/bootstrap-checks-xes.asciidoc index a9150ec056c..6ee9c29b44f 100644 --- a/x-pack/docs/en/setup/bootstrap-checks-xes.asciidoc +++ b/x-pack/docs/en/setup/bootstrap-checks-xes.asciidoc @@ -43,7 +43,7 @@ to each user. If you use files to manage the role mappings, you must configure a YAML file and copy it to each node in the cluster. By default, role mappings are stored in -`ES_PATH_CONF/x-pack/role_mapping.yml`. Alternatively, you can specify a +`ES_PATH_CONF/role_mapping.yml`. Alternatively, you can specify a different role mapping file for each type of realm and specify its location in the `elasticsearch.yml` file. For more information, see {xpack-ref}/mapping-roles.html#mapping-roles-file[Using Role Mapping Files]. diff --git a/x-pack/docs/en/watcher/trigger/schedule/cron.asciidoc b/x-pack/docs/en/watcher/trigger/schedule/cron.asciidoc index c24668a688d..57d33051097 100644 --- a/x-pack/docs/en/watcher/trigger/schedule/cron.asciidoc +++ b/x-pack/docs/en/watcher/trigger/schedule/cron.asciidoc @@ -213,7 +213,7 @@ minute during the weekend: {xpack} ships with a `elasticsearch-croneval` command line tool that you can use to verify that your cron expressions are valid and produce the expected results. This tool is -provided in the `$ES_HOME/bin/x-pack` directory. +provided in the `$ES_HOME/bin` directory. To verify a cron expression, simply pass it in as a parameter to `elasticsearch-croneval`: From 792827061065e402c8f80ce849e6f613d7536d39 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Mon, 14 May 2018 13:13:26 -0700 Subject: [PATCH 15/31] [DOCS] Fix realm setting names (#30499) --- .../security/authorization/mapping-roles.asciidoc | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/x-pack/docs/en/security/authorization/mapping-roles.asciidoc b/x-pack/docs/en/security/authorization/mapping-roles.asciidoc index 2c1f1998c68..fba87db9786 100644 --- a/x-pack/docs/en/security/authorization/mapping-roles.asciidoc +++ b/x-pack/docs/en/security/authorization/mapping-roles.asciidoc @@ -39,14 +39,11 @@ this. By default, role mappings are stored in `ES_PATH_CONF/role_mapping.yml`, where `ES_PATH_CONF` is `ES_HOME/config` (zip/tar installations) or `/etc/elasticsearch` (package installations). To specify a different location, -you configure the `files.role_mapping` realm settings in `elasticsearch.yml`. -This setting enables you to use a different set of mappings for each realm type: - -|===== -| `xpack.security.authc.ldap.files.role_mapping` | | | The location of the role mappings for LDAP realms. -| `xpack.security.authc.active_directory.files.role_mapping` | | | The location of the role mappings for Active Directory realms. -| `xpack.security.authc.pki.files.role_mapping` | | | The location of the role mappings for PKI realms. -|===== +you configure the `files.role_mapping` setting in the +{ref}/security-settings.html#ref-ad-settings[Active Directory], +{ref}/security-settings.html#ref-ldap-settings[LDAP], and +{ref}/security-settings.html#ref-pki-settings[PKI] realm settings in +`elasticsearch.yml`. Within the role mapping file, the security roles are keys and groups and users are values. The mappings can have a many-to-many relationship. When you map roles From 1b0e6ee89f852e4e99c6e81f55878f4d8bd4bbef Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Mon, 14 May 2018 13:32:09 -0700 Subject: [PATCH 16/31] Deprecate Empty Templates (#30194) Deprecate the use of empty templates. Bug fix allows empty templates/scripts to be loaded on start up for upgrades/restarts, but empty templates can no longer be created. --- .../elasticsearch/script/ScriptMetaData.java | 21 ++++++- .../script/StoredScriptSource.java | 62 ++++++++++++++++--- .../script/ScriptMetaDataTests.java | 41 ++++++++++++ .../script/StoredScriptSourceTests.java | 2 +- .../script/StoredScriptTests.java | 36 ++++++++++- 5 files changed, 148 insertions(+), 14 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/script/ScriptMetaData.java b/server/src/main/java/org/elasticsearch/script/ScriptMetaData.java index dca17ce4866..9505875ae1e 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptMetaData.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptMetaData.java @@ -29,6 +29,8 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -46,6 +48,11 @@ import java.util.Map; */ public final class ScriptMetaData implements MetaData.Custom, Writeable, ToXContentFragment { + /** + * Standard deprecation logger for used to deprecate allowance of empty templates. + */ + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(ScriptMetaData.class)); + /** * A builder used to modify the currently stored scripts data held within * the {@link ClusterState}. Scripts can be added or deleted, then built @@ -161,8 +168,8 @@ public final class ScriptMetaData implements MetaData.Custom, Writeable, ToXCont * * {@code * { - * "" : "<{@link StoredScriptSource#fromXContent(XContentParser)}>", - * "" : "<{@link StoredScriptSource#fromXContent(XContentParser)}>", + * "" : "<{@link StoredScriptSource#fromXContent(XContentParser, boolean)}>", + * "" : "<{@link StoredScriptSource#fromXContent(XContentParser, boolean)}>", * ... * } * } @@ -209,6 +216,14 @@ public final class ScriptMetaData implements MetaData.Custom, Writeable, ToXCont lang = id.substring(0, split); id = id.substring(split + 1); source = new StoredScriptSource(lang, parser.text(), Collections.emptyMap()); + + if (source.getSource().isEmpty()) { + if (source.getLang().equals(Script.DEFAULT_TEMPLATE_LANG)) { + DEPRECATION_LOGGER.deprecated("empty templates should no longer be used"); + } else { + DEPRECATION_LOGGER.deprecated("empty scripts should no longer be used"); + } + } } exists = scripts.get(id); @@ -231,7 +246,7 @@ public final class ScriptMetaData implements MetaData.Custom, Writeable, ToXCont } exists = scripts.get(id); - source = StoredScriptSource.fromXContent(parser); + source = StoredScriptSource.fromXContent(parser, true); if (exists == null) { scripts.put(id, source); diff --git a/server/src/main/java/org/elasticsearch/script/StoredScriptSource.java b/server/src/main/java/org/elasticsearch/script/StoredScriptSource.java index 9c52ff943d2..da6dad1dff3 100644 --- a/server/src/main/java/org/elasticsearch/script/StoredScriptSource.java +++ b/server/src/main/java/org/elasticsearch/script/StoredScriptSource.java @@ -32,6 +32,8 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ObjectParser; @@ -57,6 +59,11 @@ import java.util.Objects; */ public class StoredScriptSource extends AbstractDiffable implements Writeable, ToXContentObject { + /** + * Standard deprecation logger for used to deprecate allowance of empty templates. + */ + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(StoredScriptSource.class)); + /** * Standard {@link ParseField} for outer level of stored script source. */ @@ -109,7 +116,7 @@ public class StoredScriptSource extends AbstractDiffable imp private void setSource(XContentParser parser) { try { if (parser.currentToken() == Token.START_OBJECT) { - //this is really for search templates, that need to be converted to json format + // this is really for search templates, that need to be converted to json format XContentBuilder builder = XContentFactory.jsonBuilder(); source = Strings.toString(builder.copyCurrentStructure(parser)); options.put(Script.CONTENT_TYPE_OPTION, XContentType.JSON.mediaType()); @@ -131,8 +138,12 @@ public class StoredScriptSource extends AbstractDiffable imp /** * Validates the parameters and creates an {@link StoredScriptSource}. + * + * @param ignoreEmpty Specify as {@code true} to ignoreEmpty the empty source check. + * This allow empty templates to be loaded for backwards compatibility. + * This allow empty templates to be loaded for backwards compatibility. */ - private StoredScriptSource build() { + private StoredScriptSource build(boolean ignoreEmpty) { if (lang == null) { throw new IllegalArgumentException("must specify lang for stored script"); } else if (lang.isEmpty()) { @@ -140,9 +151,25 @@ public class StoredScriptSource extends AbstractDiffable imp } if (source == null) { - throw new IllegalArgumentException("must specify source for stored script"); + if (ignoreEmpty || Script.DEFAULT_TEMPLATE_LANG.equals(lang)) { + if (Script.DEFAULT_TEMPLATE_LANG.equals(lang)) { + DEPRECATION_LOGGER.deprecated("empty templates should no longer be used"); + } else { + DEPRECATION_LOGGER.deprecated("empty scripts should no longer be used"); + } + } else { + throw new IllegalArgumentException("must specify source for stored script"); + } } else if (source.isEmpty()) { - throw new IllegalArgumentException("source cannot be empty"); + if (ignoreEmpty || Script.DEFAULT_TEMPLATE_LANG.equals(lang)) { + if (Script.DEFAULT_TEMPLATE_LANG.equals(lang)) { + DEPRECATION_LOGGER.deprecated("empty templates should no longer be used"); + } else { + DEPRECATION_LOGGER.deprecated("empty scripts should no longer be used"); + } + } else { + throw new IllegalArgumentException("source cannot be empty"); + } } if (options.size() > 1 || options.size() == 1 && options.get(Script.CONTENT_TYPE_OPTION) == null) { @@ -257,6 +284,8 @@ public class StoredScriptSource extends AbstractDiffable imp token = parser.nextToken(); if (token == Token.END_OBJECT) { + DEPRECATION_LOGGER.deprecated("empty templates should no longer be used"); + return new StoredScriptSource(Script.DEFAULT_TEMPLATE_LANG, "", Collections.emptyMap()); } @@ -271,7 +300,7 @@ public class StoredScriptSource extends AbstractDiffable imp token = parser.nextToken(); if (token == Token.START_OBJECT) { - return PARSER.apply(parser, null).build(); + return PARSER.apply(parser, null).build(false); } else { throw new ParsingException(parser.getTokenLocation(), "unexpected token [" + token + "], expected [{, ]"); } @@ -280,7 +309,13 @@ public class StoredScriptSource extends AbstractDiffable imp token = parser.nextToken(); if (token == Token.VALUE_STRING) { - return new StoredScriptSource(Script.DEFAULT_TEMPLATE_LANG, parser.text(), Collections.emptyMap()); + String source = parser.text(); + + if (source == null || source.isEmpty()) { + DEPRECATION_LOGGER.deprecated("empty templates should no longer be used"); + } + + return new StoredScriptSource(Script.DEFAULT_TEMPLATE_LANG, source, Collections.emptyMap()); } } @@ -293,7 +328,13 @@ public class StoredScriptSource extends AbstractDiffable imp builder.copyCurrentStructure(parser); } - return new StoredScriptSource(Script.DEFAULT_TEMPLATE_LANG, Strings.toString(builder), Collections.emptyMap()); + String source = Strings.toString(builder); + + if (source == null || source.isEmpty()) { + DEPRECATION_LOGGER.deprecated("empty templates should no longer be used"); + } + + return new StoredScriptSource(Script.DEFAULT_TEMPLATE_LANG, source, Collections.emptyMap()); } } } catch (IOException ioe) { @@ -320,9 +361,12 @@ public class StoredScriptSource extends AbstractDiffable imp * * Note that the "source" parameter can also handle template parsing including from * a complex JSON object. + * + * @param ignoreEmpty Specify as {@code true} to ignoreEmpty the empty source check. + * This allows empty templates to be loaded for backwards compatibility. */ - public static StoredScriptSource fromXContent(XContentParser parser) { - return PARSER.apply(parser, null).build(); + public static StoredScriptSource fromXContent(XContentParser parser, boolean ignoreEmpty) { + return PARSER.apply(parser, null).build(ignoreEmpty); } /** diff --git a/server/src/test/java/org/elasticsearch/script/ScriptMetaDataTests.java b/server/src/test/java/org/elasticsearch/script/ScriptMetaDataTests.java index d5769cd192b..32d4d48a448 100644 --- a/server/src/test/java/org/elasticsearch/script/ScriptMetaDataTests.java +++ b/server/src/test/java/org/elasticsearch/script/ScriptMetaDataTests.java @@ -22,6 +22,8 @@ import org.elasticsearch.cluster.DiffableUtils; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -130,6 +132,45 @@ public class ScriptMetaDataTests extends AbstractSerializingTestCase Date: Mon, 14 May 2018 16:41:25 -0400 Subject: [PATCH 17/31] Adjust versions for resize copy settings (#30578) Now that the change to deprecate copy settings and disallow it being explicitly set to false is backported, this commit adjusts the BWC versions in master. --- .../rest-api-spec/test/indices.shrink/10_basic.yml | 4 ++-- .../test/indices.shrink/20_source_mapping.yml | 4 ++-- .../test/indices.shrink/30_copy_settings.yml | 4 ++-- .../rest-api-spec/test/indices.split/10_basic.yml | 12 ++++++------ .../test/indices.split/20_source_mapping.yml | 4 ++-- .../test/indices.split/30_copy_settings.yml | 4 ++-- .../action/admin/indices/shrink/ResizeRequest.java | 5 +---- 7 files changed, 17 insertions(+), 20 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml index a88b37ead31..f94cf286fd8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml @@ -1,8 +1,8 @@ --- "Shrink index via API": - skip: - version: " - 6.99.99" - reason: expects warnings that pre-7.0.0 will not send + version: " - 6.3.99" + reason: expects warnings that pre-6.4.0 will not send features: "warnings" # creates an index with one document solely allocated on the master node # and shrinks it into a new index with a single shard diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml index ee7b2215d21..6f532ff81c6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml @@ -1,8 +1,8 @@ --- "Shrink index ignores target template mapping": - skip: - version: " - 6.99.99" - reason: expects warnings that pre-7.0.0 will not send + version: " - 6.3.99" + reason: expects warnings that pre-6.4.0 will not send features: "warnings" - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml index 50438384b3a..53a12aad787 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml @@ -1,8 +1,8 @@ --- "Copy settings during shrink index": - skip: - version: " - 6.99.99" - reason: expects warnings that pre-7.0.0 will not send + version: " - 6.3.99" + reason: expects warnings that pre-6.4.0 will not send features: "warnings" - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml index 635673c182f..4f645d3eb3e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml @@ -33,8 +33,8 @@ setup: --- "Split index via API": - skip: - version: " - 6.99.99" - reason: expects warnings that pre-7.0.0 will not send + version: " - 6.3.99" + reason: expects warnings that pre-6.4.0 will not send features: "warnings" # make it read-only @@ -110,8 +110,8 @@ setup: # when re-enabling uncomment the below skips version: "all" reason: "AwaitsFix'ing, see https://github.com/elastic/elasticsearch/issues/30503" - # version: " - 6.99.99" - # reason: expects warnings that pre-7.0.0 will not send + # version: " - 6.3.99" + # reason: expects warnings that pre-6.4.0 will not send features: "warnings" - do: indices.create: @@ -213,8 +213,8 @@ setup: --- "Create illegal split indices": - skip: - version: " - 6.99.99" - reason: expects warnings that pre-7.0.0 will not send + version: " - 6.3.99" + reason: expects warnings that pre-6.4.0 will not send features: "warnings" # try to do an illegal split with number_of_routing_shards set diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml index 433ac040dd1..4bac4bf5b08 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml @@ -4,8 +4,8 @@ # when re-enabling uncomment the below skips version: "all" reason: "AwaitsFix'ing, see https://github.com/elastic/elasticsearch/issues/30503" - # version: " - 6.99.99" - # reason: expects warnings that pre-7.0.0 will not send + # version: " - 6.3.99" + # reason: expects warnings that pre-6.4.0 will not send features: "warnings" # create index diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml index e0ace991f4f..9e64b2b8130 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml @@ -1,8 +1,8 @@ --- "Copy settings during split index": - skip: - version: " - 6.99.99" - reason: expects warnings that pre-7.0.0 will not send + version: " - 6.3.99" + reason: expects warnings that pre-6.4.0 will not send features: "warnings" - do: diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java index e510c0719df..ca046c48acc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java @@ -101,8 +101,6 @@ public class ResizeRequest extends AcknowledgedRequest implements } if (in.getVersion().before(Version.V_6_4_0)) { copySettings = null; - } else if (in.getVersion().onOrAfter(Version.V_6_4_0) && in.getVersion().before(Version.V_7_0_0_alpha1)){ - copySettings = in.readBoolean(); } else { copySettings = in.readOptionalBoolean(); } @@ -116,10 +114,9 @@ public class ResizeRequest extends AcknowledgedRequest implements if (out.getVersion().onOrAfter(ResizeAction.COMPATIBILITY_VERSION)) { out.writeEnum(type); } + // noinspection StatementWithEmptyBody if (out.getVersion().before(Version.V_6_4_0)) { - } else if (out.getVersion().onOrAfter(Version.V_6_4_0) && out.getVersion().before(Version.V_7_0_0_alpha1)) { - out.writeBoolean(copySettings == null ? false : copySettings); } else { out.writeOptionalBoolean(copySettings); } From 56d32bc8b2c73417df0d3e41206ef128f09558e5 Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Mon, 14 May 2018 16:43:29 -0400 Subject: [PATCH 18/31] SQL: Extract SQL request and response classes (#30457) Extracts SQL request and response classes. This is the first step towards creation of a small minimal dependencies jdbc driver. Relates #29856 --- .../xpack/sql/jdbc/jdbc/JdbcConnection.java | 6 +- .../xpack/sql/jdbc/jdbc/JdbcStatement.java | 6 +- .../xpack/sql/jdbc/jdbc/PreparedQuery.java | 6 +- .../sql/jdbc/net/client/JdbcHttpClient.java | 33 +-- .../sql/jdbc/jdbc/TypeConverterTests.java | 4 +- .../xpack/sql/cli/command/CliSession.java | 5 +- .../sql/cli/command/ServerInfoCliCommand.java | 4 +- .../cli/command/ServerQueryCliCommand.java | 8 +- .../xpack/sql/cli/CliSessionTests.java | 8 +- .../command/ServerInfoCliCommandTests.java | 6 +- .../command/ServerQueryCliCommandTests.java | 6 +- .../sql/plugin/AbstractSqlQueryRequest.java | 76 +++---- .../xpack/sql/plugin/AbstractSqlRequest.java | 20 +- .../xpack/sql/plugin/CliFormatter.java | 29 +-- .../xpack/sql/plugin/MetaColumnInfo.java | 191 ------------------ .../sql/plugin/SqlClearCursorAction.java | 1 - .../sql/plugin/SqlClearCursorRequest.java | 20 +- .../sql/plugin/SqlClearCursorResponse.java | 17 +- .../xpack/sql/plugin/SqlQueryAction.java | 1 - .../xpack/sql/plugin/SqlQueryRequest.java | 26 +-- .../sql/plugin/SqlQueryRequestBuilder.java | 13 +- .../xpack/sql/plugin/SqlQueryResponse.java | 86 +++----- .../xpack/sql/plugin/SqlTranslateRequest.java | 13 ++ .../plugin/SqlTranslateRequestBuilder.java | 14 +- .../xpack/sql/proto/AbstractSqlRequest.java | 42 ++++ .../sql/{plugin => proto}/ColumnInfo.java | 56 ++--- .../xpack/sql/proto/MainResponse.java | 107 ++++++++++ .../elasticsearch/xpack/sql/proto/Mode.java | 30 +++ .../xpack/sql/proto/Protocol.java | 31 +++ .../sql/proto/SqlClearCursorRequest.java | 48 +++++ .../sql/proto/SqlClearCursorResponse.java | 61 ++++++ .../xpack/sql/proto/SqlQueryRequest.java | 172 ++++++++++++++++ .../xpack/sql/proto/SqlQueryResponse.java | 122 +++++++++++ .../{plugin => proto}/SqlTypedParamValue.java | 26 +-- .../plugin/SqlClearCursorRequestTests.java | 7 +- .../plugin/SqlClearCursorResponseTests.java | 4 +- .../sql/plugin/SqlQueryRequestTests.java | 18 +- .../sql/plugin/SqlQueryResponseTests.java | 5 +- .../sql/plugin/SqlTranslateRequestTests.java | 7 +- .../xpack/sql/client/HttpClient.java | 37 ++-- .../xpack/sql/execution/PlanExecutor.java | 2 +- .../xpack/sql/parser/AstBuilder.java | 2 +- .../xpack/sql/parser/CommandBuilder.java | 4 +- .../xpack/sql/parser/ExpressionBuilder.java | 4 +- .../xpack/sql/parser/LogicalPlanBuilder.java | 2 +- .../xpack/sql/parser/SqlParser.java | 2 +- .../sql/plugin/RestSqlClearCursorAction.java | 10 +- .../xpack/sql/plugin/RestSqlQueryAction.java | 8 +- .../sql/plugin/RestSqlTranslateAction.java | 3 +- .../xpack/sql/plugin/SqlLicenseChecker.java | 8 +- .../xpack/sql/plugin/TextFormat.java | 9 +- .../sql/plugin/TransportSqlQueryAction.java | 3 +- .../xpack/sql/session/Configuration.java | 8 +- .../xpack/sql/session/SqlSession.java | 4 +- .../xpack/sql/action/SqlActionIT.java | 4 +- .../sql/execution/search/CursorTests.java | 5 +- .../xpack/sql/expression/ParameterTests.java | 18 +- .../sql/parser/LikeEscapingParsingTests.java | 8 +- .../logical/command/sys/SysTablesTests.java | 4 +- .../xpack/sql/plugin/CliFormatterTests.java | 17 +- .../xpack/sql/plugin/TextFormatTests.java | 1 + 61 files changed, 909 insertions(+), 589 deletions(-) delete mode 100644 x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/MetaColumnInfo.java create mode 100644 x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/AbstractSqlRequest.java rename x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/{plugin => proto}/ColumnInfo.java (70%) create mode 100644 x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/MainResponse.java create mode 100644 x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Mode.java create mode 100644 x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Protocol.java create mode 100644 x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlClearCursorRequest.java create mode 100644 x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlClearCursorResponse.java create mode 100644 x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlQueryRequest.java create mode 100644 x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlQueryResponse.java rename x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/{plugin => proto}/SqlTypedParamValue.java (76%) diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcConnection.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcConnection.java index 17f8973cea3..0eb1888487c 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcConnection.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcConnection.java @@ -43,6 +43,10 @@ public class JdbcConnection implements Connection, JdbcWrapper { private String catalog; private String schema; + /** + * The SQLException is the only type of Exception the JDBC API can throw (and that the user expects). + * If we remove it, we need to make sure no other types of Exceptions (runtime or otherwise) are thrown + */ public JdbcConnection(JdbcConfiguration connectionInfo) throws SQLException { cfg = connectionInfo; client = new JdbcHttpClient(connectionInfo); @@ -428,4 +432,4 @@ public class JdbcConnection implements Connection, JdbcWrapper { int esInfoMinorVersion() throws SQLException { return client.serverInfo().minorVersion; } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcStatement.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcStatement.java index fab21c54179..c773dd5d17d 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcStatement.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcStatement.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.sql.jdbc.jdbc; import org.elasticsearch.xpack.sql.jdbc.net.client.Cursor; import org.elasticsearch.xpack.sql.jdbc.net.client.RequestMeta; -import org.elasticsearch.xpack.sql.plugin.SqlTypedParamValue; +import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; import java.sql.Connection; import java.sql.ResultSet; @@ -220,7 +220,7 @@ class JdbcStatement implements Statement, JdbcWrapper { // unset (in this case -1 which the user cannot set) - in this case, the default fetch size is returned // 0 meaning the hint is disabled (the user has called setFetch) // >0 means actual hint - + // tl;dr - unless the user set it, returning the default is fine return requestMeta.fetchSize(); } @@ -402,4 +402,4 @@ class JdbcStatement implements Statement, JdbcWrapper { close(); } } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/PreparedQuery.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/PreparedQuery.java index 4aaf337f2b7..06825ee6e3f 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/PreparedQuery.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/PreparedQuery.java @@ -6,7 +6,7 @@ package org.elasticsearch.xpack.sql.jdbc.jdbc; import org.elasticsearch.xpack.sql.jdbc.JdbcSQLException; -import org.elasticsearch.xpack.sql.plugin.SqlTypedParamValue; +import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; import org.elasticsearch.xpack.sql.type.DataType; import java.sql.JDBCType; @@ -73,7 +73,7 @@ class PreparedQuery { */ List params() { return Arrays.stream(this.params).map( - paramInfo -> new SqlTypedParamValue(paramInfo.value, DataType.fromJdbcType(paramInfo.type)) + paramInfo -> new SqlTypedParamValue(DataType.fromJdbcType(paramInfo.type), paramInfo.value) ).collect(Collectors.toList()); } @@ -86,4 +86,4 @@ class PreparedQuery { static PreparedQuery prepare(String sql) throws SQLException { return new PreparedQuery(sql, SqlQueryParameterAnalyzer.parametersCount(sql)); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/client/JdbcHttpClient.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/client/JdbcHttpClient.java index ab4cdff9858..89ee78e0bae 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/client/JdbcHttpClient.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/client/JdbcHttpClient.java @@ -5,22 +5,21 @@ */ package org.elasticsearch.xpack.sql.jdbc.net.client; -import org.elasticsearch.action.main.MainResponse; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.xpack.sql.client.HttpClient; import org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcConfiguration; import org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfo; import org.elasticsearch.xpack.sql.jdbc.net.protocol.InfoResponse; -import org.elasticsearch.xpack.sql.plugin.AbstractSqlQueryRequest; -import org.elasticsearch.xpack.sql.plugin.AbstractSqlRequest; -import org.elasticsearch.xpack.sql.plugin.SqlQueryRequest; -import org.elasticsearch.xpack.sql.plugin.SqlQueryResponse; -import org.elasticsearch.xpack.sql.plugin.SqlTypedParamValue; +import org.elasticsearch.xpack.sql.proto.Mode; +import org.elasticsearch.xpack.sql.proto.Protocol; +import org.elasticsearch.xpack.sql.proto.SqlQueryRequest; +import org.elasticsearch.xpack.sql.proto.MainResponse; +import org.elasticsearch.xpack.sql.proto.SqlQueryResponse; +import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; import java.sql.SQLException; import java.util.List; -import java.util.TimeZone; import java.util.stream.Collectors; import static org.elasticsearch.xpack.sql.client.shared.StringUtils.EMPTY; @@ -34,6 +33,10 @@ public class JdbcHttpClient { private final JdbcConfiguration conCfg; private InfoResponse serverInfo; + /** + * The SQLException is the only type of Exception the JDBC API can throw (and that the user expects). + * If we remove it, we need to make sure no other types of Exceptions (runtime or otherwise) are thrown + */ public JdbcHttpClient(JdbcConfiguration conCfg) throws SQLException { httpClient = new HttpClient(conCfg); this.conCfg = conCfg; @@ -45,9 +48,9 @@ public class JdbcHttpClient { public Cursor query(String sql, List params, RequestMeta meta) throws SQLException { int fetch = meta.fetchSize() > 0 ? meta.fetchSize() : conCfg.pageSize(); - SqlQueryRequest sqlRequest = new SqlQueryRequest(AbstractSqlRequest.Mode.JDBC, sql, params, null, - AbstractSqlQueryRequest.DEFAULT_TIME_ZONE, - fetch, TimeValue.timeValueMillis(meta.timeoutInMs()), TimeValue.timeValueMillis(meta.queryTimeoutInMs()), ""); + SqlQueryRequest sqlRequest = new SqlQueryRequest(Mode.JDBC, sql, params, null, + Protocol.TIME_ZONE, + fetch, TimeValue.timeValueMillis(meta.timeoutInMs()), TimeValue.timeValueMillis(meta.queryTimeoutInMs())); SqlQueryResponse response = httpClient.query(sqlRequest); return new DefaultCursor(this, response.cursor(), toJdbcColumnInfo(response.columns()), response.rows(), meta); } @@ -57,10 +60,8 @@ public class JdbcHttpClient { * the scroll id to use to fetch the next page. */ public Tuple>> nextPage(String cursor, RequestMeta meta) throws SQLException { - SqlQueryRequest sqlRequest = new SqlQueryRequest().cursor(cursor); - sqlRequest.mode(AbstractSqlRequest.Mode.JDBC); - sqlRequest.requestTimeout(TimeValue.timeValueMillis(meta.timeoutInMs())); - sqlRequest.pageTimeout(TimeValue.timeValueMillis(meta.queryTimeoutInMs())); + SqlQueryRequest sqlRequest = new SqlQueryRequest(Mode.JDBC, cursor, TimeValue.timeValueMillis(meta.timeoutInMs()), + TimeValue.timeValueMillis(meta.queryTimeoutInMs())); SqlQueryResponse response = httpClient.query(sqlRequest); return new Tuple<>(response.cursor(), response.rows()); } @@ -78,13 +79,13 @@ public class JdbcHttpClient { private InfoResponse fetchServerInfo() throws SQLException { MainResponse mainResponse = httpClient.serverInfo(); - return new InfoResponse(mainResponse.getClusterName().value(), mainResponse.getVersion().major, mainResponse.getVersion().minor); + return new InfoResponse(mainResponse.getClusterName(), mainResponse.getVersion().major, mainResponse.getVersion().minor); } /** * Converts REST column metadata into JDBC column metadata */ - private List toJdbcColumnInfo(List columns) { + private List toJdbcColumnInfo(List columns) { return columns.stream().map(columnInfo -> new ColumnInfo(columnInfo.name(), columnInfo.jdbcType(), EMPTY, EMPTY, EMPTY, EMPTY, columnInfo.displaySize()) ).collect(Collectors.toList()); diff --git a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverterTests.java b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverterTests.java index 612c46fbe56..dc4ba9fa244 100644 --- a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverterTests.java +++ b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverterTests.java @@ -10,8 +10,8 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.sql.plugin.AbstractSqlRequest; import org.elasticsearch.xpack.sql.plugin.SqlQueryResponse; +import org.elasticsearch.xpack.sql.proto.Mode; import org.joda.time.DateTime; import java.sql.JDBCType; @@ -51,7 +51,7 @@ public class TypeConverterTests extends ESTestCase { XContentBuilder builder = JsonXContent.contentBuilder(); builder.startObject(); builder.field("value"); - SqlQueryResponse.value(builder, AbstractSqlRequest.Mode.JDBC, value); + SqlQueryResponse.value(builder, Mode.JDBC, value); builder.endObject(); builder.close(); Object copy = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2().get("value"); diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/CliSession.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/CliSession.java index 64f38c2254c..8e030f36dd0 100644 --- a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/CliSession.java +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/CliSession.java @@ -5,11 +5,12 @@ */ package org.elasticsearch.xpack.sql.cli.command; -import org.elasticsearch.action.main.MainResponse; import org.elasticsearch.xpack.sql.client.HttpClient; import org.elasticsearch.xpack.sql.client.shared.ClientException; import org.elasticsearch.xpack.sql.client.shared.Version; import org.elasticsearch.xpack.sql.plugin.AbstractSqlQueryRequest; +import org.elasticsearch.xpack.sql.proto.MainResponse; +import org.elasticsearch.xpack.sql.proto.Protocol; import java.sql.SQLException; @@ -18,7 +19,7 @@ import java.sql.SQLException; */ public class CliSession { private final HttpClient httpClient; - private int fetchSize = AbstractSqlQueryRequest.DEFAULT_FETCH_SIZE; + private int fetchSize = Protocol.FETCH_SIZE; private String fetchSeparator = ""; private boolean debug; diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommand.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommand.java index 635c041da7a..e637386f979 100644 --- a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommand.java +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommand.java @@ -5,8 +5,8 @@ */ package org.elasticsearch.xpack.sql.cli.command; -import org.elasticsearch.action.main.MainResponse; import org.elasticsearch.xpack.sql.cli.CliTerminal; +import org.elasticsearch.xpack.sql.proto.MainResponse; import java.sql.SQLException; import java.util.Locale; @@ -30,7 +30,7 @@ public class ServerInfoCliCommand extends AbstractServerCliCommand { } terminal.line() .text("Node:").em(info.getNodeName()) - .text(" Cluster:").em(info.getClusterName().value()) + .text(" Cluster:").em(info.getClusterName()) .text(" Version:").em(info.getVersion().toString()) .ln(); return true; diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommand.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommand.java index c1fc609c50b..aa8bc499cd2 100644 --- a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommand.java +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommand.java @@ -9,7 +9,7 @@ import org.elasticsearch.xpack.sql.cli.CliTerminal; import org.elasticsearch.xpack.sql.client.HttpClient; import org.elasticsearch.xpack.sql.client.shared.JreHttpUrlConnection; import org.elasticsearch.xpack.sql.plugin.CliFormatter; -import org.elasticsearch.xpack.sql.plugin.SqlQueryResponse; +import org.elasticsearch.xpack.sql.proto.SqlQueryResponse; import java.sql.SQLException; @@ -23,8 +23,8 @@ public class ServerQueryCliCommand extends AbstractServerCliCommand { String data; try { response = cliClient.queryInit(line, cliSession.getFetchSize()); - cliFormatter = new CliFormatter(response); - data = cliFormatter.formatWithHeader(response); + cliFormatter = new CliFormatter(response.columns(), response.rows()); + data = cliFormatter.formatWithHeader(response.columns(), response.rows()); while (true) { handleText(terminal, data); if (response.cursor().isEmpty()) { @@ -36,7 +36,7 @@ public class ServerQueryCliCommand extends AbstractServerCliCommand { terminal.println(cliSession.getFetchSeparator()); } response = cliSession.getClient().nextPage(response.cursor()); - data = cliFormatter.formatWithoutHeader(response); + data = cliFormatter.formatWithoutHeader(response.rows()); } } catch (SQLException e) { if (JreHttpUrlConnection.SQL_STATE_BAD_SERVER.equals(e.getSQLState())) { diff --git a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/CliSessionTests.java b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/CliSessionTests.java index befcddf9e7d..e5643ad443a 100644 --- a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/CliSessionTests.java +++ b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/CliSessionTests.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.sql.cli; import org.elasticsearch.Build; -import org.elasticsearch.action.main.MainResponse; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.common.UUIDs; import org.elasticsearch.test.ESTestCase; @@ -14,6 +13,7 @@ import org.elasticsearch.xpack.sql.cli.command.CliSession; import org.elasticsearch.xpack.sql.client.HttpClient; import org.elasticsearch.xpack.sql.client.shared.ClientException; import org.elasticsearch.xpack.sql.client.shared.Version; +import org.elasticsearch.xpack.sql.proto.MainResponse; import java.sql.SQLException; @@ -28,7 +28,7 @@ public class CliSessionTests extends ESTestCase { public void testProperConnection() throws Exception { HttpClient httpClient = mock(HttpClient.class); when(httpClient.serverInfo()).thenReturn(new MainResponse(randomAlphaOfLength(5), org.elasticsearch.Version.CURRENT, - ClusterName.DEFAULT, UUIDs.randomBase64UUID(), Build.CURRENT)); + ClusterName.DEFAULT.value(), UUIDs.randomBase64UUID(), Build.CURRENT)); CliSession cliSession = new CliSession(httpClient); cliSession.checkConnection(); verify(httpClient, times(1)).serverInfo(); @@ -58,10 +58,10 @@ public class CliSessionTests extends ESTestCase { } when(httpClient.serverInfo()).thenReturn(new MainResponse(randomAlphaOfLength(5), org.elasticsearch.Version.fromString(major + "." + minor + ".23"), - ClusterName.DEFAULT, UUIDs.randomBase64UUID(), Build.CURRENT)); + ClusterName.DEFAULT.value(), UUIDs.randomBase64UUID(), Build.CURRENT)); CliSession cliSession = new CliSession(httpClient); expectThrows(ClientException.class, cliSession::checkConnection); verify(httpClient, times(1)).serverInfo(); verifyNoMoreInteractions(httpClient); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommandTests.java b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommandTests.java index 567cd10531d..e99cb2fb7f7 100644 --- a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommandTests.java +++ b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommandTests.java @@ -6,12 +6,12 @@ package org.elasticsearch.xpack.sql.cli.command; import org.elasticsearch.Build; -import org.elasticsearch.action.main.MainResponse; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.common.UUIDs; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.sql.cli.TestTerminal; import org.elasticsearch.xpack.sql.client.HttpClient; +import org.elasticsearch.xpack.sql.proto.MainResponse; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; @@ -36,7 +36,7 @@ public class ServerInfoCliCommandTests extends ESTestCase { HttpClient client = mock(HttpClient.class); CliSession cliSession = new CliSession(client); when(client.serverInfo()).thenReturn(new MainResponse("my_node", org.elasticsearch.Version.fromString("1.2.3"), - new ClusterName("my_cluster"), UUIDs.randomBase64UUID(), Build.CURRENT)); + new ClusterName("my_cluster").value(), UUIDs.randomBase64UUID(), Build.CURRENT)); ServerInfoCliCommand cliCommand = new ServerInfoCliCommand(); assertTrue(cliCommand.handle(testTerminal, cliSession, "info")); assertEquals(testTerminal.toString(), "Node:my_node Cluster:my_cluster Version:1.2.3\n"); @@ -44,4 +44,4 @@ public class ServerInfoCliCommandTests extends ESTestCase { verifyNoMoreInteractions(client); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommandTests.java b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommandTests.java index 4385731313a..86ebfa52fe4 100644 --- a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommandTests.java +++ b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommandTests.java @@ -8,8 +8,8 @@ package org.elasticsearch.xpack.sql.cli.command; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.sql.cli.TestTerminal; import org.elasticsearch.xpack.sql.client.HttpClient; -import org.elasticsearch.xpack.sql.plugin.ColumnInfo; -import org.elasticsearch.xpack.sql.plugin.SqlQueryResponse; +import org.elasticsearch.xpack.sql.proto.ColumnInfo; +import org.elasticsearch.xpack.sql.proto.SqlQueryResponse; import java.sql.JDBCType; import java.sql.SQLException; @@ -119,4 +119,4 @@ public class ServerQueryCliCommandTests extends ESTestCase { } return new SqlQueryResponse(cursor, columns, rows); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/AbstractSqlQueryRequest.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/AbstractSqlQueryRequest.java index 8969b881619..8d34d59c1e0 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/AbstractSqlQueryRequest.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/AbstractSqlQueryRequest.java @@ -10,12 +10,17 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.xpack.sql.proto.Mode; +import org.elasticsearch.xpack.sql.proto.Protocol; +import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; +import org.elasticsearch.xpack.sql.type.DataType; import java.io.IOException; import java.util.Collections; @@ -28,20 +33,12 @@ import java.util.function.Supplier; * Base class for requests that contain sql queries (Query and Translate) */ public abstract class AbstractSqlQueryRequest extends AbstractSqlRequest implements CompositeIndicesRequest, ToXContentFragment { - public static final TimeZone DEFAULT_TIME_ZONE = TimeZone.getTimeZone("UTC"); - - /** - * Global choice for the default fetch size. - */ - public static final int DEFAULT_FETCH_SIZE = 1000; - public static final TimeValue DEFAULT_REQUEST_TIMEOUT = TimeValue.timeValueSeconds(90); - public static final TimeValue DEFAULT_PAGE_TIMEOUT = TimeValue.timeValueSeconds(45); private String query = ""; - private TimeZone timeZone = DEFAULT_TIME_ZONE; - private int fetchSize = DEFAULT_FETCH_SIZE; - private TimeValue requestTimeout = DEFAULT_REQUEST_TIMEOUT; - private TimeValue pageTimeout = DEFAULT_PAGE_TIMEOUT; + private TimeZone timeZone = Protocol.TIME_ZONE; + private int fetchSize = Protocol.FETCH_SIZE; + private TimeValue requestTimeout = Protocol.REQUEST_TIMEOUT; + private TimeValue pageTimeout = Protocol.PAGE_TIMEOUT; @Nullable private QueryBuilder filter = null; private List params = Collections.emptyList(); @@ -69,11 +66,10 @@ public abstract class AbstractSqlQueryRequest extends AbstractSqlRequest impleme parser.declareObjectArray(AbstractSqlQueryRequest::params, (p, c) -> SqlTypedParamValue.fromXContent(p), new ParseField("params")); parser.declareString((request, zoneId) -> request.timeZone(TimeZone.getTimeZone(zoneId)), new ParseField("time_zone")); parser.declareInt(AbstractSqlQueryRequest::fetchSize, new ParseField("fetch_size")); + parser.declareString((request, timeout) -> request.requestTimeout(TimeValue.parseTimeValue(timeout, Protocol.REQUEST_TIMEOUT, + "request_timeout")), new ParseField("request_timeout")); parser.declareString( - (request, timeout) -> request.requestTimeout(TimeValue.parseTimeValue(timeout, DEFAULT_REQUEST_TIMEOUT, "request_timeout")), - new ParseField("request_timeout")); - parser.declareString( - (request, timeout) -> request.pageTimeout(TimeValue.parseTimeValue(timeout, DEFAULT_PAGE_TIMEOUT, "page_timeout")), + (request, timeout) -> request.pageTimeout(TimeValue.parseTimeValue(timeout, Protocol.PAGE_TIMEOUT, "page_timeout")), new ParseField("page_timeout")); parser.declareObject(AbstractSqlQueryRequest::filter, (p, c) -> AbstractQueryBuilder.parseInnerQueryBuilder(p), new ParseField("filter")); @@ -185,7 +181,7 @@ public abstract class AbstractSqlQueryRequest extends AbstractSqlRequest impleme public AbstractSqlQueryRequest(StreamInput in) throws IOException { super(in); query = in.readString(); - params = in.readList(SqlTypedParamValue::new); + params = in.readList(AbstractSqlQueryRequest::readSqlTypedParamValue); timeZone = TimeZone.getTimeZone(in.readString()); fetchSize = in.readVInt(); requestTimeout = in.readTimeValue(); @@ -193,11 +189,23 @@ public abstract class AbstractSqlQueryRequest extends AbstractSqlRequest impleme filter = in.readOptionalNamedWriteable(QueryBuilder.class); } + public static void writeSqlTypedParamValue(StreamOutput out, SqlTypedParamValue value) throws IOException { + out.writeEnum(value.dataType); + out.writeGenericValue(value.value); + } + + public static SqlTypedParamValue readSqlTypedParamValue(StreamInput in) throws IOException { + return new SqlTypedParamValue(in.readEnum(DataType.class), in.readGenericValue()); + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(query); - out.writeList(params); + out.writeVInt(params.size()); + for (SqlTypedParamValue param: params) { + writeSqlTypedParamValue(out, param); + } out.writeString(timeZone.getID()); out.writeVInt(fetchSize); out.writeTimeValue(requestTimeout); @@ -224,36 +232,4 @@ public abstract class AbstractSqlQueryRequest extends AbstractSqlRequest impleme public int hashCode() { return Objects.hash(super.hashCode(), query, timeZone, fetchSize, requestTimeout, pageTimeout, filter); } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - if (query != null) { - builder.field("query", query); - } - if (this.params.isEmpty() == false) { - builder.startArray("params"); - for (SqlTypedParamValue val : this.params) { - val.toXContent(builder, params); - } - builder.endArray(); - } - if (timeZone != null) { - builder.field("time_zone", timeZone.getID()); - } - if (fetchSize != DEFAULT_FETCH_SIZE) { - builder.field("fetch_size", fetchSize); - } - if (requestTimeout != DEFAULT_REQUEST_TIMEOUT) { - builder.field("request_timeout", requestTimeout.getStringRep()); - } - if (pageTimeout != DEFAULT_PAGE_TIMEOUT) { - builder.field("page_timeout", pageTimeout.getStringRep()); - } - if (filter != null) { - builder.field("filter"); - filter.toXContent(builder, params); - } - return builder; - } - } diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/AbstractSqlRequest.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/AbstractSqlRequest.java index bc4b1e81e44..2cb23f796d6 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/AbstractSqlRequest.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/AbstractSqlRequest.java @@ -10,9 +10,9 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.xpack.sql.proto.Mode; import java.io.IOException; -import java.util.Locale; import java.util.Objects; import static org.elasticsearch.action.ValidateActions.addValidationError; @@ -24,24 +24,6 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; */ public abstract class AbstractSqlRequest extends ActionRequest implements ToXContent { - public enum Mode { - PLAIN, - JDBC; - - public static Mode fromString(String mode) { - if (mode == null) { - return PLAIN; - } - return Mode.valueOf(mode.toUpperCase(Locale.ROOT)); - } - - - @Override - public String toString() { - return this.name().toLowerCase(Locale.ROOT); - } - } - private Mode mode = Mode.PLAIN; protected AbstractSqlRequest() { diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/CliFormatter.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/CliFormatter.java index 9d9a9ea04a4..359652fa4f2 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/CliFormatter.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/CliFormatter.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.sql.plugin; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xpack.sql.proto.ColumnInfo; import java.io.IOException; import java.util.Arrays; @@ -28,19 +29,19 @@ public class CliFormatter implements Writeable { /** * Create a new {@linkplain CliFormatter} for formatting responses similar - * to the provided {@link SqlQueryResponse}. + * to the provided columns and rows. */ - public CliFormatter(SqlQueryResponse response) { + public CliFormatter(List columns, List> rows) { // Figure out the column widths: // 1. Start with the widths of the column names - width = new int[response.columns().size()]; + width = new int[columns.size()]; for (int i = 0; i < width.length; i++) { // TODO read the width from the data type? - width[i] = Math.max(MIN_COLUMN_WIDTH, response.columns().get(i).name().length()); + width[i] = Math.max(MIN_COLUMN_WIDTH, columns.get(i).name().length()); } // 2. Expand columns to fit the largest value - for (List row : response.rows()) { + for (List row : rows) { for (int i = 0; i < width.length; i++) { // TODO are we sure toString is correct here? What about dates that come back as longs. // Tracked by https://github.com/elastic/x-pack-elasticsearch/issues/3081 @@ -62,15 +63,15 @@ public class CliFormatter implements Writeable { * Format the provided {@linkplain SqlQueryResponse} for the CLI * including the header lines. */ - public String formatWithHeader(SqlQueryResponse response) { + public String formatWithHeader(List columns, List> rows) { // The header lines - StringBuilder sb = new StringBuilder(estimateSize(response.rows().size() + 2)); + StringBuilder sb = new StringBuilder(estimateSize(rows.size() + 2)); for (int i = 0; i < width.length; i++) { if (i > 0) { sb.append('|'); } - String name = response.columns().get(i).name(); + String name = columns.get(i).name(); // left padding int leftPadding = (width[i] - name.length()) / 2; for (int j = 0; j < leftPadding; j++) { @@ -98,19 +99,19 @@ public class CliFormatter implements Writeable { /* Now format the results. Sadly, this means that column * widths are entirely determined by the first batch of * results. */ - return formatWithoutHeader(sb, response); + return formatWithoutHeader(sb, rows); } /** * Format the provided {@linkplain SqlQueryResponse} for the CLI * without the header lines. */ - public String formatWithoutHeader(SqlQueryResponse response) { - return formatWithoutHeader(new StringBuilder(estimateSize(response.rows().size())), response); + public String formatWithoutHeader(List> rows) { + return formatWithoutHeader(new StringBuilder(estimateSize(rows.size())), rows); } - private String formatWithoutHeader(StringBuilder sb, SqlQueryResponse response) { - for (List row : response.rows()) { + private String formatWithoutHeader(StringBuilder sb, List> rows) { + for (List row : rows) { for (int i = 0; i < width.length; i++) { if (i > 0) { sb.append('|'); @@ -138,7 +139,7 @@ public class CliFormatter implements Writeable { } /** - * Pick a good estimate of the buffer size needed to contain the rows. + * Pick a good estimate of the buffer size needed to contain the rows. */ int estimateSize(int rows) { /* Each column has either a '|' or a '\n' after it diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/MetaColumnInfo.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/MetaColumnInfo.java deleted file mode 100644 index 72d5932f511..00000000000 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/MetaColumnInfo.java +++ /dev/null @@ -1,191 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.sql.plugin; - -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.ToXContentObject; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; - -import java.io.IOException; -import java.sql.JDBCType; -import java.util.Objects; - -import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; -import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; - -/** - * Information about a column returned by the listColumns response - */ -public class MetaColumnInfo implements Writeable, ToXContentObject { - - private static final ConstructingObjectParser PARSER = - new ConstructingObjectParser<>("column_info", true, objects -> - new MetaColumnInfo( - (String) objects[0], - (String) objects[1], - (String) objects[2], - objects[3] == null ? null : JDBCType.valueOf((int) objects[3]), - objects[4] == null ? 0 : (int) objects[4], - (int) objects[5])); - - private static final ParseField TABLE = new ParseField("table"); - private static final ParseField NAME = new ParseField("name"); - private static final ParseField ES_TYPE = new ParseField("type"); - private static final ParseField JDBC_TYPE = new ParseField("jdbc_type"); - private static final ParseField SIZE = new ParseField("size"); - private static final ParseField POSITION = new ParseField("position"); - - static { - PARSER.declareString(constructorArg(), TABLE); - PARSER.declareString(constructorArg(), NAME); - PARSER.declareString(constructorArg(), ES_TYPE); - PARSER.declareInt(optionalConstructorArg(), JDBC_TYPE); - PARSER.declareInt(optionalConstructorArg(), SIZE); - PARSER.declareInt(constructorArg(), POSITION); - } - - private final String table; - private final String name; - private final String esType; - @Nullable - private final JDBCType jdbcType; - private final int size; - private final int position; - - public MetaColumnInfo(String table, String name, String esType, JDBCType jdbcType, int size, int position) { - this.table = table; - this.name = name; - this.esType = esType; - this.jdbcType = jdbcType; - this.size = size; - this.position = position; - } - - public MetaColumnInfo(String table, String name, String esType, int position) { - this(table, name, esType, null, 0, position); - } - - MetaColumnInfo(StreamInput in) throws IOException { - table = in.readString(); - name = in.readString(); - esType = in.readString(); - if (in.readBoolean()) { - jdbcType = JDBCType.valueOf(in.readVInt()); - size = in.readVInt(); - } else { - jdbcType = null; - size = 0; - } - position = in.readVInt(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(table); - out.writeString(name); - out.writeString(esType); - if (jdbcType != null) { - out.writeBoolean(true); - out.writeVInt(jdbcType.getVendorTypeNumber()); - out.writeVInt(size); - } else { - out.writeBoolean(false); - } - out.writeVInt(position); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.field("table", table); - builder.field("name", name); - builder.field("type", esType); - if (jdbcType != null) { - builder.field("jdbc_type", jdbcType.getVendorTypeNumber()); - builder.field("size", size); - } - builder.field("position", position); - return builder.endObject(); - } - - - public static MetaColumnInfo fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } - - /** - * Name of the table. - */ - public String table() { - return table; - } - - /** - * Name of the column. - */ - public String name() { - return name; - } - - /** - * The type of the column in Elasticsearch. - */ - public String esType() { - return esType; - } - - /** - * The type of the column as it would be returned by a JDBC driver. - */ - public JDBCType jdbcType() { - return jdbcType; - } - - /** - * Precision - */ - public int size() { - return size; - } - - /** - * Column position with in the tables - */ - public int position() { - return position; - } - - @Override - public String toString() { - return Strings.toString(this); - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - MetaColumnInfo that = (MetaColumnInfo) o; - return size == that.size && - position == that.position && - Objects.equals(table, that.table) && - Objects.equals(name, that.name) && - Objects.equals(esType, that.esType) && - jdbcType == that.jdbcType; - } - - @Override - public int hashCode() { - return Objects.hash(table, name, esType, jdbcType, size, position); - } - -} diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorAction.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorAction.java index ed64fa2a41e..f0b91640f98 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorAction.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorAction.java @@ -13,7 +13,6 @@ public class SqlClearCursorAction public static final SqlClearCursorAction INSTANCE = new SqlClearCursorAction(); public static final String NAME = "indices:data/read/sql/close_cursor"; - public static final String REST_ENDPOINT = "/_xpack/sql/close"; private SqlClearCursorAction() { super(NAME); diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorRequest.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorRequest.java index 0dfb9f71e38..45dda285887 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorRequest.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorRequest.java @@ -10,9 +10,9 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.sql.proto.Mode; import java.io.IOException; import java.util.Objects; @@ -23,13 +23,13 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constru /** * Request to clean all SQL resources associated with the cursor */ -public class SqlClearCursorRequest extends AbstractSqlRequest implements ToXContentObject { +public class SqlClearCursorRequest extends AbstractSqlRequest { private static final ConstructingObjectParser PARSER = - new ConstructingObjectParser<>(SqlClearCursorAction.NAME, true, (objects, mode) -> new SqlClearCursorRequest( - mode, - (String) objects[0] - )); + new ConstructingObjectParser<>(SqlClearCursorAction.NAME, true, (objects, mode) -> new SqlClearCursorRequest( + mode, + (String) objects[0] + )); static { PARSER.declareString(constructorArg(), new ParseField("cursor")); @@ -96,13 +96,11 @@ public class SqlClearCursorRequest extends AbstractSqlRequest implements ToXCont @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.field("cursor", cursor); - builder.endObject(); - return builder; + // This is needed just to test round-trip compatibility with proto.SqlClearCursorRequest + return new org.elasticsearch.xpack.sql.proto.SqlClearCursorRequest(mode(), cursor).toXContent(builder, params); } public static SqlClearCursorRequest fromXContent(XContentParser parser, Mode mode) { return PARSER.apply(parser, mode); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorResponse.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorResponse.java index b157d65dfff..3bb3df9a47f 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorResponse.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorResponse.java @@ -6,13 +6,10 @@ package org.elasticsearch.xpack.sql.plugin; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.StatusToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.rest.RestStatus; import java.io.IOException; @@ -20,20 +17,13 @@ import java.util.Objects; import static org.elasticsearch.rest.RestStatus.NOT_FOUND; import static org.elasticsearch.rest.RestStatus.OK; +import static org.elasticsearch.xpack.sql.proto.SqlClearCursorResponse.SUCCEEDED; /** * Response to the request to clean all SQL resources associated with the cursor */ public class SqlClearCursorResponse extends ActionResponse implements StatusToXContentObject { - private static final ParseField SUCCEEDED = new ParseField("succeeded"); - public static final ObjectParser PARSER = - new ObjectParser<>(SqlClearCursorAction.NAME, true, SqlClearCursorResponse::new); - static { - PARSER.declareBoolean(SqlClearCursorResponse::setSucceeded, SUCCEEDED); - } - - private boolean succeeded; public SqlClearCursorResponse(boolean succeeded) { @@ -93,9 +83,4 @@ public class SqlClearCursorResponse extends ActionResponse implements StatusToXC return Objects.hash(succeeded); } - public static SqlClearCursorResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } - - } diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlQueryAction.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlQueryAction.java index fd46799608c..cbcf626adad 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlQueryAction.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlQueryAction.java @@ -12,7 +12,6 @@ public class SqlQueryAction extends Action PARSER = objectParser(SqlQueryRequest::new); public static final ParseField CURSOR = new ParseField("cursor"); @@ -37,7 +38,7 @@ public class SqlQueryRequest extends AbstractSqlQueryRequest implements ToXConte static { PARSER.declareString(SqlQueryRequest::cursor, CURSOR); PARSER.declareObject(SqlQueryRequest::filter, - (p, c) -> AbstractQueryBuilder.parseInnerQueryBuilder(p), FILTER); + (p, c) -> AbstractQueryBuilder.parseInnerQueryBuilder(p), FILTER); } private String cursor = ""; @@ -108,24 +109,15 @@ public class SqlQueryRequest extends AbstractSqlQueryRequest implements ToXConte @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - super.toXContent(builder, params); - if (cursor != null) { - builder.field("cursor", cursor); - } - builder.endObject(); - return builder; - } - - @Override - public boolean isFragment() { - return false; + // This is needed just to test round-trip compatibility with proto.SqlQueryRequest + return new org.elasticsearch.xpack.sql.proto.SqlQueryRequest(mode(), query(), params(), timeZone(), fetchSize(), + requestTimeout(), pageTimeout(), filter(), cursor()).toXContent(builder, params); } public static SqlQueryRequest fromXContent(XContentParser parser, Mode mode) { - SqlQueryRequest request = PARSER.apply(parser, null); + SqlQueryRequest request = PARSER.apply(parser, null); request.mode(mode); return request; } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlQueryRequestBuilder.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlQueryRequestBuilder.java index a08af6f8ce4..1eddd09d89d 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlQueryRequestBuilder.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlQueryRequestBuilder.java @@ -9,25 +9,22 @@ import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.xpack.sql.plugin.AbstractSqlRequest.Mode; +import org.elasticsearch.xpack.sql.proto.Mode; +import org.elasticsearch.xpack.sql.proto.Protocol; +import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; import java.util.Collections; import java.util.List; import java.util.TimeZone; -import static org.elasticsearch.xpack.sql.plugin.AbstractSqlQueryRequest.DEFAULT_FETCH_SIZE; -import static org.elasticsearch.xpack.sql.plugin.AbstractSqlQueryRequest.DEFAULT_PAGE_TIMEOUT; -import static org.elasticsearch.xpack.sql.plugin.AbstractSqlQueryRequest.DEFAULT_REQUEST_TIMEOUT; -import static org.elasticsearch.xpack.sql.plugin.AbstractSqlQueryRequest.DEFAULT_TIME_ZONE; - /** * The builder to build sql request */ public class SqlQueryRequestBuilder extends ActionRequestBuilder { public SqlQueryRequestBuilder(ElasticsearchClient client, SqlQueryAction action) { - this(client, action, "", Collections.emptyList(), null, DEFAULT_TIME_ZONE, DEFAULT_FETCH_SIZE, DEFAULT_REQUEST_TIMEOUT, - DEFAULT_PAGE_TIMEOUT, "", Mode.PLAIN); + this(client, action, "", Collections.emptyList(), null, Protocol.TIME_ZONE, Protocol.FETCH_SIZE, Protocol.REQUEST_TIMEOUT, + Protocol.PAGE_TIMEOUT, "", Mode.PLAIN); } public SqlQueryRequestBuilder(ElasticsearchClient client, SqlQueryAction action, String query, List params, diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlQueryResponse.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlQueryResponse.java index e0de05cd774..118ba81f82d 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlQueryResponse.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlQueryResponse.java @@ -7,49 +7,28 @@ package org.elasticsearch.xpack.sql.plugin; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.sql.proto.ColumnInfo; +import org.elasticsearch.xpack.sql.proto.Mode; import org.joda.time.ReadableDateTime; import java.io.IOException; +import java.sql.JDBCType; import java.util.ArrayList; import java.util.List; import java.util.Objects; import static java.util.Collections.unmodifiableList; -import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; -import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; -import static org.elasticsearch.common.xcontent.XContentParserUtils.parseFieldsValue; /** * Response to perform an sql query */ public class SqlQueryResponse extends ActionResponse implements ToXContentObject { - @SuppressWarnings("unchecked") - public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("sql", true, - objects -> new SqlQueryResponse( - objects[0] == null ? "" : (String) objects[0], - (List) objects[1], - (List>) objects[2])); - - public static final ParseField CURSOR = new ParseField("cursor"); - public static final ParseField COLUMNS = new ParseField("columns"); - public static final ParseField ROWS = new ParseField("rows"); - - static { - PARSER.declareString(optionalConstructorArg(), CURSOR); - PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> ColumnInfo.fromXContent(p), COLUMNS); - PARSER.declareField(constructorArg(), (p, c) -> parseRows(p), ROWS, ValueType.OBJECT_ARRAY); - } - // TODO: Simplify cursor handling private String cursor; private List columns; @@ -109,7 +88,7 @@ public class SqlQueryResponse extends ActionResponse implements ToXContentObject int columnCount = in.readVInt(); List columns = new ArrayList<>(columnCount); for (int c = 0; c < columnCount; c++) { - columns.add(new ColumnInfo(in)); + columns.add(readColumnInfo(in)); } this.columns = unmodifiableList(columns); } else { @@ -139,7 +118,7 @@ public class SqlQueryResponse extends ActionResponse implements ToXContentObject out.writeBoolean(true); out.writeVInt(columns.size()); for (ColumnInfo column : columns) { - column.writeTo(out); + writeColumnInfo(out, column); } } out.writeVInt(rows.size()); @@ -155,7 +134,7 @@ public class SqlQueryResponse extends ActionResponse implements ToXContentObject @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - AbstractSqlRequest.Mode mode = AbstractSqlRequest.Mode.fromString(params.param("mode")); + Mode mode = Mode.fromString(params.param("mode")); builder.startObject(); { if (columns != null) { @@ -187,8 +166,8 @@ public class SqlQueryResponse extends ActionResponse implements ToXContentObject /** * Serializes the provided value in SQL-compatible way based on the client mode */ - public static XContentBuilder value(XContentBuilder builder, AbstractSqlRequest.Mode mode, Object value) throws IOException { - if (mode == AbstractSqlRequest.Mode.JDBC && value instanceof ReadableDateTime) { + public static XContentBuilder value(XContentBuilder builder, Mode mode, Object value) throws IOException { + if (mode == Mode.JDBC && value instanceof ReadableDateTime) { // JDBC cannot parse dates in string format builder.value(((ReadableDateTime) value).getMillis()); } else { @@ -197,34 +176,33 @@ public class SqlQueryResponse extends ActionResponse implements ToXContentObject return builder; } - public static SqlQueryResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); + public static ColumnInfo readColumnInfo(StreamInput in) throws IOException { + String table = in.readString(); + String name = in.readString(); + String esType = in.readString(); + JDBCType jdbcType; + int displaySize; + if (in.readBoolean()) { + jdbcType = JDBCType.valueOf(in.readVInt()); + displaySize = in.readVInt(); + } else { + jdbcType = null; + displaySize = 0; + } + return new ColumnInfo(table, name, esType, jdbcType, displaySize); } - public static List> parseRows(XContentParser parser) throws IOException { - List> list = new ArrayList<>(); - while (parser.nextToken() != XContentParser.Token.END_ARRAY) { - if (parser.currentToken() == XContentParser.Token.START_ARRAY) { - list.add(parseRow(parser)); - } else { - throw new IllegalStateException("expected start array but got [" + parser.currentToken() + "]"); - } + public static void writeColumnInfo(StreamOutput out, ColumnInfo columnInfo) throws IOException { + out.writeString(columnInfo.table()); + out.writeString(columnInfo.name()); + out.writeString(columnInfo.esType()); + if (columnInfo.jdbcType() != null) { + out.writeBoolean(true); + out.writeVInt(columnInfo.jdbcType().getVendorTypeNumber()); + out.writeVInt(columnInfo.displaySize()); + } else { + out.writeBoolean(false); } - return list; - } - - public static List parseRow(XContentParser parser) throws IOException { - List list = new ArrayList<>(); - while (parser.nextToken() != XContentParser.Token.END_ARRAY) { - if (parser.currentToken().isValue()) { - list.add(parseFieldsValue(parser)); - } else if (parser.currentToken() == XContentParser.Token.VALUE_NULL) { - list.add(null); - } else { - throw new IllegalStateException("expected value but got [" + parser.currentToken() + "]"); - } - } - return list; } @Override diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateRequest.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateRequest.java index 93e06307451..103bfe5fddd 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateRequest.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateRequest.java @@ -10,8 +10,11 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.xpack.sql.proto.Mode; +import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; import java.io.IOException; import java.util.List; @@ -56,4 +59,14 @@ public class SqlTranslateRequest extends AbstractSqlQueryRequest { request.mode(mode); return request; } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + // This is needed just to test parsing of SqlTranslateRequest, so we can reuse SqlQuerySerialization + return new org.elasticsearch.xpack.sql.proto.SqlQueryRequest(mode(), query(), params(), timeZone(), fetchSize(), + requestTimeout(), pageTimeout(), filter(), null).toXContent(builder, params); + + } + + } diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateRequestBuilder.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateRequestBuilder.java index 11adc975014..d6d97c19297 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateRequestBuilder.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateRequestBuilder.java @@ -9,27 +9,25 @@ import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.xpack.sql.proto.Mode; +import org.elasticsearch.xpack.sql.proto.Protocol; +import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; import java.util.Collections; import java.util.List; import java.util.TimeZone; -import static org.elasticsearch.xpack.sql.plugin.AbstractSqlQueryRequest.DEFAULT_FETCH_SIZE; -import static org.elasticsearch.xpack.sql.plugin.AbstractSqlQueryRequest.DEFAULT_PAGE_TIMEOUT; -import static org.elasticsearch.xpack.sql.plugin.AbstractSqlQueryRequest.DEFAULT_REQUEST_TIMEOUT; -import static org.elasticsearch.xpack.sql.plugin.AbstractSqlQueryRequest.DEFAULT_TIME_ZONE; - /** * Builder for the request for the sql action for translating SQL queries into ES requests */ public class SqlTranslateRequestBuilder extends ActionRequestBuilder { public SqlTranslateRequestBuilder(ElasticsearchClient client, SqlTranslateAction action) { - this(client, action, AbstractSqlRequest.Mode.PLAIN, null, null, Collections.emptyList(), DEFAULT_TIME_ZONE, DEFAULT_FETCH_SIZE, - DEFAULT_REQUEST_TIMEOUT, DEFAULT_PAGE_TIMEOUT); + this(client, action, Mode.PLAIN, null, null, Collections.emptyList(), Protocol.TIME_ZONE, Protocol.FETCH_SIZE, + Protocol.REQUEST_TIMEOUT, Protocol.PAGE_TIMEOUT); } - public SqlTranslateRequestBuilder(ElasticsearchClient client, SqlTranslateAction action, AbstractSqlRequest.Mode mode, String query, + public SqlTranslateRequestBuilder(ElasticsearchClient client, SqlTranslateAction action, Mode mode, String query, QueryBuilder filter, List params, TimeZone timeZone, int fetchSize, TimeValue requestTimeout, TimeValue pageTimeout) { super(client, action, new SqlTranslateRequest(mode, query, params, filter, timeZone, fetchSize, requestTimeout, pageTimeout)); diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/AbstractSqlRequest.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/AbstractSqlRequest.java new file mode 100644 index 00000000000..2001aecdac5 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/AbstractSqlRequest.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.proto; + +import org.elasticsearch.common.xcontent.ToXContentFragment; + +import java.util.Objects; + +/** + * Base request for all SQL-related requests for JDBC/CLI client + *

+ * Contains information about the client mode that can be used to generate different responses based on the caller type. + */ +public abstract class AbstractSqlRequest implements ToXContentFragment { + + private final Mode mode; + + protected AbstractSqlRequest(Mode mode) { + this.mode = mode; + } + + public Mode mode() { + return mode; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + AbstractSqlRequest that = (AbstractSqlRequest) o; + return mode == that.mode; + } + + @Override + public int hashCode() { + return Objects.hash(mode); + } + +} diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/ColumnInfo.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/ColumnInfo.java similarity index 70% rename from x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/ColumnInfo.java rename to x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/ColumnInfo.java index 5c12c776dd1..ad2f687ae0b 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/ColumnInfo.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/ColumnInfo.java @@ -3,14 +3,11 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.sql.plugin; +package org.elasticsearch.xpack.sql.proto; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -26,16 +23,16 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optiona /** * Information about a column returned with first query response */ -public final class ColumnInfo implements Writeable, ToXContentObject { +public class ColumnInfo implements ToXContentObject { private static final ConstructingObjectParser PARSER = - new ConstructingObjectParser<>("column_info", true, objects -> - new ColumnInfo( - objects[0] == null ? "" : (String) objects[0], - (String) objects[1], - (String) objects[2], - objects[3] == null ? null : JDBCType.valueOf((int) objects[3]), - objects[4] == null ? 0 : (int) objects[4])); + new ConstructingObjectParser<>("column_info", true, objects -> + new ColumnInfo( + objects[0] == null ? "" : (String) objects[0], + (String) objects[1], + (String) objects[2], + objects[3] == null ? null : JDBCType.valueOf((int) objects[3]), + objects[4] == null ? 0 : (int) objects[4])); private static final ParseField TABLE = new ParseField("table"); private static final ParseField NAME = new ParseField("name"); @@ -74,33 +71,6 @@ public final class ColumnInfo implements Writeable, ToXContentObject { this.displaySize = 0; } - ColumnInfo(StreamInput in) throws IOException { - table = in.readString(); - name = in.readString(); - esType = in.readString(); - if (in.readBoolean()) { - jdbcType = JDBCType.valueOf(in.readVInt()); - displaySize = in.readVInt(); - } else { - jdbcType = null; - displaySize = 0; - } - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(table); - out.writeString(name); - out.writeString(esType); - if (jdbcType != null) { - out.writeBoolean(true); - out.writeVInt(jdbcType.getVendorTypeNumber()); - out.writeVInt(displaySize); - } else { - out.writeBoolean(false); - } - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -162,10 +132,10 @@ public final class ColumnInfo implements Writeable, ToXContentObject { if (o == null || getClass() != o.getClass()) return false; ColumnInfo that = (ColumnInfo) o; return displaySize == that.displaySize && - Objects.equals(table, that.table) && - Objects.equals(name, that.name) && - Objects.equals(esType, that.esType) && - jdbcType == that.jdbcType; + Objects.equals(table, that.table) && + Objects.equals(name, that.name) && + Objects.equals(esType, that.esType) && + jdbcType == that.jdbcType; } @Override diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/MainResponse.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/MainResponse.java new file mode 100644 index 00000000000..73b6cbc529e --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/MainResponse.java @@ -0,0 +1,107 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.proto; + +import org.elasticsearch.Build; +import org.elasticsearch.Version; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.util.Objects; + +/** + * Main (/) response for JDBC/CLI client + */ +public class MainResponse { + private String nodeName; + // TODO: Add parser for Version + private Version version; + private String clusterName; + private String clusterUuid; + // TODO: Add parser for Build + private Build build; + + private MainResponse() { + } + + public MainResponse(String nodeName, Version version, String clusterName, String clusterUuid, Build build) { + this.nodeName = nodeName; + this.version = version; + this.clusterName = clusterName; + this.clusterUuid = clusterUuid; + this.build = build; + } + + public String getNodeName() { + return nodeName; + } + + public Version getVersion() { + return version; + } + + public String getClusterName() { + return clusterName; + } + + public String getClusterUuid() { + return clusterUuid; + } + + public Build getBuild() { + return build; + } + + private static final ObjectParser PARSER = new ObjectParser<>(MainResponse.class.getName(), true, + MainResponse::new); + + static { + PARSER.declareString((response, value) -> response.nodeName = value, new ParseField("name")); + PARSER.declareString((response, value) -> response.clusterName = value, new ParseField("cluster_name")); + PARSER.declareString((response, value) -> response.clusterUuid = value, new ParseField("cluster_uuid")); + PARSER.declareString((response, value) -> { + }, new ParseField("tagline")); + PARSER.declareObject((response, value) -> { + final String buildFlavor = (String) value.get("build_flavor"); + final String buildType = (String) value.get("build_type"); + response.build = + new Build( + buildFlavor == null ? Build.Flavor.UNKNOWN : Build.Flavor.fromDisplayName(buildFlavor), + buildType == null ? Build.Type.UNKNOWN : Build.Type.fromDisplayName(buildType), + (String) value.get("build_hash"), + (String) value.get("build_date"), + (boolean) value.get("build_snapshot")); + response.version = Version.fromString((String) value.get("number")); + }, (parser, context) -> parser.map(), new ParseField("version")); + } + + public static MainResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + MainResponse other = (MainResponse) o; + return Objects.equals(nodeName, other.nodeName) && + Objects.equals(version, other.version) && + Objects.equals(clusterUuid, other.clusterUuid) && + Objects.equals(build, other.build) && + Objects.equals(clusterName, other.clusterName); + } + + @Override + public int hashCode() { + return Objects.hash(nodeName, version, clusterUuid, build, clusterName); + } +} diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Mode.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Mode.java new file mode 100644 index 00000000000..02f175ca80d --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Mode.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.proto; + +import java.util.Locale; + +/** + * SQL protocol mode + */ +public enum Mode { + PLAIN, + JDBC; + + public static Mode fromString(String mode) { + if (mode == null) { + return PLAIN; + } + return Mode.valueOf(mode.toUpperCase(Locale.ROOT)); + } + + + @Override + public String toString() { + return this.name().toLowerCase(Locale.ROOT); + } +} diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Protocol.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Protocol.java new file mode 100644 index 00000000000..a61978828c8 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Protocol.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.proto; + +import org.elasticsearch.common.unit.TimeValue; + +import java.util.TimeZone; + +/** + * Sql protocol defaults and end-points shared between JDBC and REST protocol implementations + */ +public final class Protocol { + public static final TimeZone TIME_ZONE = TimeZone.getTimeZone("UTC"); + + /** + * Global choice for the default fetch size. + */ + public static final int FETCH_SIZE = 1000; + public static final TimeValue REQUEST_TIMEOUT = TimeValue.timeValueSeconds(90); + public static final TimeValue PAGE_TIMEOUT = TimeValue.timeValueSeconds(45); + + /** + * SQL-related endpoints + */ + public static final String CLEAR_CURSOR_REST_ENDPOINT = "/_xpack/sql/close"; + public static final String SQL_QUERY_REST_ENDPOINT = "/_xpack/sql"; +} diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlClearCursorRequest.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlClearCursorRequest.java new file mode 100644 index 00000000000..310dde44302 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlClearCursorRequest.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.proto; + +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * Request to clean all SQL resources associated with the cursor for JDBC/CLI client + */ +public class SqlClearCursorRequest extends AbstractSqlRequest { + + private final String cursor; + + public SqlClearCursorRequest(Mode mode, String cursor) { + super(mode); + this.cursor = cursor; + } + + public String getCursor() { + return cursor; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + SqlClearCursorRequest that = (SqlClearCursorRequest) o; + return Objects.equals(cursor, that.cursor); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), cursor); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field("cursor", cursor); + return builder; + } +} diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlClearCursorResponse.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlClearCursorResponse.java new file mode 100644 index 00000000000..b56a8335d20 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlClearCursorResponse.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.proto; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * Response to the request to clean all SQL resources associated with the cursor for JDBC/CLI client + */ +public class SqlClearCursorResponse { + + public static final ParseField SUCCEEDED = new ParseField("succeeded"); + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>(SqlClearCursorResponse.class.getName(), true, + objects -> new SqlClearCursorResponse(objects[0] == null ? false : (boolean) objects[0])); + + static { + PARSER.declareBoolean(optionalConstructorArg(), SUCCEEDED); + } + + + private final boolean succeeded; + + public SqlClearCursorResponse(boolean succeeded) { + this.succeeded = succeeded; + } + + /** + * @return Whether the attempt to clear a cursor was successful. + */ + public boolean isSucceeded() { + return succeeded; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + SqlClearCursorResponse response = (SqlClearCursorResponse) o; + return succeeded == response.succeeded; + } + + @Override + public int hashCode() { + return Objects.hash(succeeded); + } + + public static SqlClearCursorResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + +} diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlQueryRequest.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlQueryRequest.java new file mode 100644 index 00000000000..00a1696a05f --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlQueryRequest.java @@ -0,0 +1,172 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.proto; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.TimeZone; + +/** + * Sql query request for JDBC/CLI client + */ +public class SqlQueryRequest extends AbstractSqlRequest { + @Nullable + private final String cursor; + private final String query; + private final TimeZone timeZone; + private final int fetchSize; + private final TimeValue requestTimeout; + private final TimeValue pageTimeout; + @Nullable + private final ToXContent filter; + private final List params; + + + public SqlQueryRequest(Mode mode, String query, List params, TimeZone timeZone, + int fetchSize, TimeValue requestTimeout, TimeValue pageTimeout, ToXContent filter, String cursor) { + super(mode); + this.query = query; + this.params = params; + this.timeZone = timeZone; + this.fetchSize = fetchSize; + this.requestTimeout = requestTimeout; + this.pageTimeout = pageTimeout; + this.filter = filter; + this.cursor = cursor; + } + + public SqlQueryRequest(Mode mode, String query, List params, ToXContent filter, TimeZone timeZone, + int fetchSize, TimeValue requestTimeout, TimeValue pageTimeout) { + this(mode, query, params, timeZone, fetchSize, requestTimeout, pageTimeout, filter, null); + } + + public SqlQueryRequest(Mode mode, String cursor, TimeValue requestTimeout, TimeValue pageTimeout) { + this(mode, "", Collections.emptyList(), Protocol.TIME_ZONE, Protocol.FETCH_SIZE, requestTimeout, pageTimeout, null, cursor); + } + + + /** + * The key that must be sent back to SQL to access the next page of + * results. + */ + public String cursor() { + return cursor; + } + + /** + * Text of SQL query + */ + public String query() { + return query; + } + + /** + * An optional list of parameters if the SQL query is parametrized + */ + public List params() { + return params; + } + + /** + * The client's time zone + */ + public TimeZone timeZone() { + return timeZone; + } + + + /** + * Hint about how many results to fetch at once. + */ + public int fetchSize() { + return fetchSize; + } + + /** + * The timeout specified on the search request + */ + public TimeValue requestTimeout() { + return requestTimeout; + } + + /** + * The scroll timeout + */ + public TimeValue pageTimeout() { + return pageTimeout; + } + + /** + * An optional Query DSL defined query that can added as a filter on the top of the SQL query + */ + public ToXContent filter() { + return filter; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + SqlQueryRequest that = (SqlQueryRequest) o; + return fetchSize == that.fetchSize && + Objects.equals(query, that.query) && + Objects.equals(params, that.params) && + Objects.equals(timeZone, that.timeZone) && + Objects.equals(requestTimeout, that.requestTimeout) && + Objects.equals(pageTimeout, that.pageTimeout) && + Objects.equals(filter, that.filter) && + Objects.equals(cursor, that.cursor); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), query, timeZone, fetchSize, requestTimeout, pageTimeout, filter, cursor); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + if (query != null) { + builder.field("query", query); + } + if (this.params.isEmpty() == false) { + builder.startArray("params"); + for (SqlTypedParamValue val : this.params) { + val.toXContent(builder, params); + } + builder.endArray(); + } + if (timeZone != null) { + builder.field("time_zone", timeZone.getID()); + } + if (fetchSize != Protocol.FETCH_SIZE) { + builder.field("fetch_size", fetchSize); + } + if (requestTimeout != Protocol.REQUEST_TIMEOUT) { + builder.field("request_timeout", requestTimeout.getStringRep()); + } + if (pageTimeout != Protocol.PAGE_TIMEOUT) { + builder.field("page_timeout", pageTimeout.getStringRep()); + } + if (filter != null) { + builder.field("filter"); + filter.toXContent(builder, params); + } + if (cursor != null) { + builder.field("cursor", cursor); + } + return builder; + } + +} diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlQueryResponse.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlQueryResponse.java new file mode 100644 index 00000000000..8937261237c --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlQueryResponse.java @@ -0,0 +1,122 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.proto; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; +import static org.elasticsearch.common.xcontent.XContentParserUtils.parseFieldsValue; + +/** + * Response to perform an sql query for JDBC/CLI client + */ +public class SqlQueryResponse { + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("sql", true, + objects -> new SqlQueryResponse( + objects[0] == null ? "" : (String) objects[0], + (List) objects[1], + (List>) objects[2])); + + public static final ParseField CURSOR = new ParseField("cursor"); + public static final ParseField COLUMNS = new ParseField("columns"); + public static final ParseField ROWS = new ParseField("rows"); + + static { + PARSER.declareString(optionalConstructorArg(), CURSOR); + PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> ColumnInfo.fromXContent(p), COLUMNS); + PARSER.declareField(constructorArg(), (p, c) -> parseRows(p), ROWS, ValueType.OBJECT_ARRAY); + } + + // TODO: Simplify cursor handling + private final String cursor; + private final List columns; + // TODO investigate reusing Page here - it probably is much more efficient + private final List> rows; + + public SqlQueryResponse(String cursor, @Nullable List columns, List> rows) { + this.cursor = cursor; + this.columns = columns; + this.rows = rows; + } + + /** + * The key that must be sent back to SQL to access the next page of + * results. If equal to "" then there is no next page. + */ + public String cursor() { + return cursor; + } + + public long size() { + return rows.size(); + } + + public List columns() { + return columns; + } + + public List> rows() { + return rows; + } + + public static SqlQueryResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public static List> parseRows(XContentParser parser) throws IOException { + List> list = new ArrayList<>(); + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { + if (parser.currentToken() == XContentParser.Token.START_ARRAY) { + list.add(parseRow(parser)); + } else { + throw new IllegalStateException("expected start array but got [" + parser.currentToken() + "]"); + } + } + return list; + } + + public static List parseRow(XContentParser parser) throws IOException { + List list = new ArrayList<>(); + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { + if (parser.currentToken().isValue()) { + list.add(parseFieldsValue(parser)); + } else if (parser.currentToken() == XContentParser.Token.VALUE_NULL) { + list.add(null); + } else { + throw new IllegalStateException("expected value but got [" + parser.currentToken() + "]"); + } + } + return list; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + SqlQueryResponse that = (SqlQueryResponse) o; + return Objects.equals(cursor, that.cursor) && + Objects.equals(columns, that.columns) && + Objects.equals(rows, that.rows); + } + + @Override + public int hashCode() { + return Objects.hash(cursor, columns, rows); + } + +} diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlTypedParamValue.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlTypedParamValue.java similarity index 76% rename from x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlTypedParamValue.java rename to x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlTypedParamValue.java index ffde82fab34..a85b66b80a3 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlTypedParamValue.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlTypedParamValue.java @@ -3,12 +3,9 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.sql.plugin; +package org.elasticsearch.xpack.sql.proto; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; @@ -25,12 +22,12 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constru /** * Represent a strongly typed parameter value */ -public class SqlTypedParamValue implements ToXContentObject, Writeable { +public class SqlTypedParamValue implements ToXContentObject { private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("params", true, objects -> new SqlTypedParamValue( - objects[0], - DataType.fromEsType((String) objects[1]))); + DataType.fromEsType((String) objects[1]), objects[0] + )); private static final ParseField VALUE = new ParseField("value"); private static final ParseField TYPE = new ParseField("type"); @@ -43,7 +40,7 @@ public class SqlTypedParamValue implements ToXContentObject, Writeable { public final Object value; public final DataType dataType; - public SqlTypedParamValue(Object value, DataType dataType) { + public SqlTypedParamValue(DataType dataType, Object value) { this.value = value; this.dataType = dataType; } @@ -61,17 +58,6 @@ public class SqlTypedParamValue implements ToXContentObject, Writeable { return PARSER.apply(parser, null); } - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeEnum(dataType); - out.writeGenericValue(value); - } - - public SqlTypedParamValue(StreamInput in) throws IOException { - dataType = in.readEnum(DataType.class); - value = in.readGenericValue(); - } - @Override public boolean equals(Object o) { if (this == o) { @@ -94,4 +80,4 @@ public class SqlTypedParamValue implements ToXContentObject, Writeable { public String toString() { return String.valueOf(value) + "[" + dataType + "]"; } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorRequestTests.java b/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorRequestTests.java index 83546924a38..e479ae8b4f1 100644 --- a/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorRequestTests.java +++ b/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorRequestTests.java @@ -8,17 +8,18 @@ package org.elasticsearch.xpack.sql.plugin; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.sql.proto.Mode; import org.junit.Before; import java.io.IOException; import java.util.function.Consumer; public class SqlClearCursorRequestTests extends AbstractSerializingTestCase { - public AbstractSqlRequest.Mode testMode; + public Mode testMode; @Before public void setup() { - testMode = randomFrom(AbstractSqlRequest.Mode.values()); + testMode = randomFrom(Mode.values()); } @Override @@ -40,7 +41,7 @@ public class SqlClearCursorRequestTests extends AbstractSerializingTestCase mutator = randomFrom( - request -> request.mode(randomValueOtherThan(request.mode(), () -> randomFrom(AbstractSqlRequest.Mode.values()))), + request -> request.mode(randomValueOtherThan(request.mode(), () -> randomFrom(Mode.values()))), request -> request.setCursor(randomValueOtherThan(request.getCursor(), SqlQueryResponseTests::randomStringCursor)) ); SqlClearCursorRequest newRequest = new SqlClearCursorRequest(instance.mode(), instance.getCursor()); diff --git a/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorResponseTests.java b/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorResponseTests.java index 0ef2875d8e7..94964428bb4 100644 --- a/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorResponseTests.java +++ b/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorResponseTests.java @@ -27,6 +27,8 @@ public class SqlClearCursorResponseTests extends AbstractStreamableXContentTestC @Override protected SqlClearCursorResponse doParseInstance(XContentParser parser) { - return SqlClearCursorResponse.fromXContent(parser); + org.elasticsearch.xpack.sql.proto.SqlClearCursorResponse response = + org.elasticsearch.xpack.sql.proto.SqlClearCursorResponse.fromXContent(parser); + return new SqlClearCursorResponse(response.isSucceeded()); } } diff --git a/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlQueryRequestTests.java b/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlQueryRequestTests.java index 5fbe4e42d48..0e4a183ab16 100644 --- a/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlQueryRequestTests.java +++ b/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlQueryRequestTests.java @@ -14,6 +14,8 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.SearchModule; import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.proto.Mode; +import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; import org.elasticsearch.xpack.sql.type.DataType; import org.junit.Before; @@ -28,11 +30,11 @@ import static org.elasticsearch.xpack.sql.plugin.SqlTestUtils.randomFilterOrNull public class SqlQueryRequestTests extends AbstractSerializingTestCase { - public AbstractSqlRequest.Mode testMode; + public Mode testMode; @Before public void setup() { - testMode = randomFrom(AbstractSqlRequest.Mode.values()); + testMode = randomFrom(Mode.values()); } @Override @@ -63,11 +65,11 @@ public class SqlQueryRequestTests extends AbstractSerializingTestCase arr = new ArrayList<>(len); for (int i = 0; i < len; i++) { @SuppressWarnings("unchecked") Supplier supplier = randomFrom( - () -> new SqlTypedParamValue(randomBoolean(), DataType.BOOLEAN), - () -> new SqlTypedParamValue(randomLong(), DataType.LONG), - () -> new SqlTypedParamValue(randomDouble(), DataType.DOUBLE), - () -> new SqlTypedParamValue(null, DataType.NULL), - () -> new SqlTypedParamValue(randomAlphaOfLength(10), DataType.KEYWORD) + () -> new SqlTypedParamValue(DataType.BOOLEAN, randomBoolean()), + () -> new SqlTypedParamValue(DataType.LONG, randomLong()), + () -> new SqlTypedParamValue(DataType.DOUBLE, randomDouble()), + () -> new SqlTypedParamValue(DataType.NULL, null), + () -> new SqlTypedParamValue(DataType.KEYWORD, randomAlphaOfLength(10)) ); arr.add(supplier.get()); } @@ -93,7 +95,7 @@ public class SqlQueryRequestTests extends AbstractSerializingTestCase mutator = randomFrom( - request -> request.mode(randomValueOtherThan(request.mode(), () -> randomFrom(AbstractSqlRequest.Mode.values()))), + request -> request.mode(randomValueOtherThan(request.mode(), () -> randomFrom(Mode.values()))), request -> request.query(randomValueOtherThan(request.query(), () -> randomAlphaOfLength(5))), request -> request.params(randomValueOtherThan(request.params(), this::randomParameters)), request -> request.timeZone(randomValueOtherThan(request.timeZone(), ESTestCase::randomTimeZone)), diff --git a/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlQueryResponseTests.java b/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlQueryResponseTests.java index 42c08bb0914..bc5e5ae2a01 100644 --- a/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlQueryResponseTests.java +++ b/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlQueryResponseTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractStreamableXContentTestCase; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.proto.ColumnInfo; import java.io.IOException; import java.sql.JDBCType; @@ -114,6 +115,8 @@ public class SqlQueryResponseTests extends AbstractStreamableXContentTestCase { - public AbstractSqlRequest.Mode testMode; + public Mode testMode; @Before public void setup() { - testMode = randomFrom(AbstractSqlRequest.Mode.values()); + testMode = randomFrom(Mode.values()); } @Override @@ -71,7 +72,7 @@ public class SqlTranslateRequestTests extends AbstractSerializingTestCase request.query(randomValueOtherThan(request.query(), () -> randomAlphaOfLength(5))), request -> request.timeZone(randomValueOtherThan(request.timeZone(), ESTestCase::randomTimeZone)), request -> request.fetchSize(randomValueOtherThan(request.fetchSize(), () -> between(1, Integer.MAX_VALUE))), - request -> request.requestTimeout(randomValueOtherThan(request.requestTimeout(), () -> randomTV())), + request -> request.requestTimeout(randomValueOtherThan(request.requestTimeout(), this::randomTV)), request -> request.filter(randomValueOtherThan(request.filter(), () -> request.filter() == null ? randomFilter(random()) : randomFilterOrNull(random()))) ); diff --git a/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/HttpClient.java b/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/HttpClient.java index bf7c245b24c..8f77d5397e9 100644 --- a/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/HttpClient.java +++ b/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/HttpClient.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.sql.client; -import org.elasticsearch.action.main.MainResponse; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -22,13 +21,14 @@ import org.elasticsearch.xpack.sql.client.shared.ClientException; import org.elasticsearch.xpack.sql.client.shared.ConnectionConfiguration; import org.elasticsearch.xpack.sql.client.shared.JreHttpUrlConnection; import org.elasticsearch.xpack.sql.client.shared.JreHttpUrlConnection.ResponseOrException; -import org.elasticsearch.xpack.sql.plugin.AbstractSqlRequest; -import org.elasticsearch.xpack.sql.plugin.SqlClearCursorAction; -import org.elasticsearch.xpack.sql.plugin.SqlClearCursorRequest; -import org.elasticsearch.xpack.sql.plugin.SqlClearCursorResponse; -import org.elasticsearch.xpack.sql.plugin.SqlQueryAction; -import org.elasticsearch.xpack.sql.plugin.SqlQueryRequest; -import org.elasticsearch.xpack.sql.plugin.SqlQueryResponse; +import org.elasticsearch.xpack.sql.proto.AbstractSqlRequest; +import org.elasticsearch.xpack.sql.proto.MainResponse; +import org.elasticsearch.xpack.sql.proto.Mode; +import org.elasticsearch.xpack.sql.proto.Protocol; +import org.elasticsearch.xpack.sql.proto.SqlClearCursorRequest; +import org.elasticsearch.xpack.sql.proto.SqlClearCursorResponse; +import org.elasticsearch.xpack.sql.proto.SqlQueryRequest; +import org.elasticsearch.xpack.sql.proto.SqlQueryResponse; import java.io.IOException; import java.io.InputStream; @@ -50,7 +50,7 @@ public class HttpClient { private final ConnectionConfiguration cfg; - public HttpClient(ConnectionConfiguration cfg) throws SQLException { + public HttpClient(ConnectionConfiguration cfg) { this.cfg = cfg; } @@ -66,26 +66,25 @@ public class HttpClient { public SqlQueryResponse queryInit(String query, int fetchSize) throws SQLException { // TODO allow customizing the time zone - this is what session set/reset/get should be about - SqlQueryRequest sqlRequest = new SqlQueryRequest(AbstractSqlRequest.Mode.PLAIN, query, Collections.emptyList(), null, + SqlQueryRequest sqlRequest = new SqlQueryRequest(Mode.PLAIN, query, Collections.emptyList(), null, TimeZone.getTimeZone("UTC"), fetchSize, TimeValue.timeValueMillis(cfg.queryTimeout()), - TimeValue.timeValueMillis(cfg.pageTimeout()), "" - ); + TimeValue.timeValueMillis(cfg.pageTimeout())); return query(sqlRequest); } public SqlQueryResponse query(SqlQueryRequest sqlRequest) throws SQLException { - return post(SqlQueryAction.REST_ENDPOINT, sqlRequest, SqlQueryResponse::fromXContent); + return post(Protocol.SQL_QUERY_REST_ENDPOINT, sqlRequest, SqlQueryResponse::fromXContent); } public SqlQueryResponse nextPage(String cursor) throws SQLException { - SqlQueryRequest sqlRequest = new SqlQueryRequest(); - sqlRequest.cursor(cursor); - return post(SqlQueryAction.REST_ENDPOINT, sqlRequest, SqlQueryResponse::fromXContent); + SqlQueryRequest sqlRequest = new SqlQueryRequest(Mode.PLAIN, cursor, TimeValue.timeValueMillis(cfg.queryTimeout()), + TimeValue.timeValueMillis(cfg.pageTimeout())); + return post(Protocol.SQL_QUERY_REST_ENDPOINT, sqlRequest, SqlQueryResponse::fromXContent); } public boolean queryClose(String cursor) throws SQLException { - SqlClearCursorResponse response = post(SqlClearCursorAction.REST_ENDPOINT, - new SqlClearCursorRequest(AbstractSqlRequest.Mode.PLAIN, cursor), + SqlClearCursorResponse response = post(Protocol.CLEAR_CURSOR_REST_ENDPOINT, + new SqlClearCursorRequest(Mode.PLAIN, cursor), SqlClearCursorResponse::fromXContent); return response.isSucceeded(); } @@ -167,4 +166,4 @@ public class HttpClient { throw new ClientException("Cannot parse response", ex); } } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/PlanExecutor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/PlanExecutor.java index 8c58769b759..23f1a6049dc 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/PlanExecutor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/PlanExecutor.java @@ -17,7 +17,7 @@ import org.elasticsearch.xpack.sql.optimizer.Optimizer; import org.elasticsearch.xpack.sql.plan.physical.EsQueryExec; import org.elasticsearch.xpack.sql.planner.Planner; import org.elasticsearch.xpack.sql.planner.PlanningException; -import org.elasticsearch.xpack.sql.plugin.SqlTypedParamValue; +import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; import org.elasticsearch.xpack.sql.session.Configuration; import org.elasticsearch.xpack.sql.session.Cursor; import org.elasticsearch.xpack.sql.session.RowSet; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/AstBuilder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/AstBuilder.java index de28f331872..48aa2cf1fa7 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/AstBuilder.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/AstBuilder.java @@ -8,7 +8,7 @@ package org.elasticsearch.xpack.sql.parser; import org.antlr.v4.runtime.Token; import org.elasticsearch.xpack.sql.parser.SqlBaseParser.SingleStatementContext; import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.sql.plugin.SqlTypedParamValue; +import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; import java.util.Map; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/CommandBuilder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/CommandBuilder.java index bf432a72363..7ce65aa4cfe 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/CommandBuilder.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/CommandBuilder.java @@ -33,7 +33,7 @@ import org.elasticsearch.xpack.sql.plan.logical.command.sys.SysColumns; import org.elasticsearch.xpack.sql.plan.logical.command.sys.SysTableTypes; import org.elasticsearch.xpack.sql.plan.logical.command.sys.SysTables; import org.elasticsearch.xpack.sql.plan.logical.command.sys.SysTypes; -import org.elasticsearch.xpack.sql.plugin.SqlTypedParamValue; +import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; import org.elasticsearch.xpack.sql.tree.Location; import org.elasticsearch.xpack.sql.util.StringUtils; @@ -190,4 +190,4 @@ abstract class CommandBuilder extends LogicalPlanBuilder { public Object visitSysTableTypes(SysTableTypesContext ctx) { return new SysTableTypes(source(ctx)); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java index b14611f9f59..a6185def278 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java @@ -76,7 +76,7 @@ import org.elasticsearch.xpack.sql.parser.SqlBaseParser.StringContext; import org.elasticsearch.xpack.sql.parser.SqlBaseParser.StringLiteralContext; import org.elasticsearch.xpack.sql.parser.SqlBaseParser.StringQueryContext; import org.elasticsearch.xpack.sql.parser.SqlBaseParser.SubqueryExpressionContext; -import org.elasticsearch.xpack.sql.plugin.SqlTypedParamValue; +import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; import org.elasticsearch.xpack.sql.tree.Location; import org.elasticsearch.xpack.sql.type.DataType; import org.elasticsearch.xpack.sql.type.DataTypes; @@ -516,4 +516,4 @@ abstract class ExpressionBuilder extends IdentifierBuilder { return params.get(token); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/LogicalPlanBuilder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/LogicalPlanBuilder.java index f41fce16027..3435994a0fc 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/LogicalPlanBuilder.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/LogicalPlanBuilder.java @@ -41,7 +41,7 @@ import org.elasticsearch.xpack.sql.plan.logical.Project; import org.elasticsearch.xpack.sql.plan.logical.SubQueryAlias; import org.elasticsearch.xpack.sql.plan.logical.UnresolvedRelation; import org.elasticsearch.xpack.sql.plan.logical.With; -import org.elasticsearch.xpack.sql.plugin.SqlTypedParamValue; +import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; import org.elasticsearch.xpack.sql.session.EmptyExecutable; import org.elasticsearch.xpack.sql.type.DataType; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlParser.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlParser.java index 7aa3748e31e..b7fe9178f91 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlParser.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlParser.java @@ -26,7 +26,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.sql.plugin.SqlTypedParamValue; +import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; import java.util.Arrays; import java.util.BitSet; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlClearCursorAction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlClearCursorAction.java index 4d47ca8c373..534d0459180 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlClearCursorAction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlClearCursorAction.java @@ -12,23 +12,25 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.sql.proto.Mode; +import org.elasticsearch.xpack.sql.proto.Protocol; import java.io.IOException; import static org.elasticsearch.rest.RestRequest.Method.POST; -import static org.elasticsearch.xpack.sql.plugin.SqlClearCursorAction.REST_ENDPOINT; + public class RestSqlClearCursorAction extends BaseRestHandler { public RestSqlClearCursorAction(Settings settings, RestController controller) { super(settings); - controller.registerHandler(POST, REST_ENDPOINT, this); + controller.registerHandler(POST, Protocol.CLEAR_CURSOR_REST_ENDPOINT, this); } @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { SqlClearCursorRequest sqlRequest; try (XContentParser parser = request.contentOrSourceParamParser()) { - sqlRequest = SqlClearCursorRequest.fromXContent(parser, AbstractSqlRequest.Mode.fromString(request.param("mode"))); + sqlRequest = SqlClearCursorRequest.fromXContent(parser, Mode.fromString(request.param("mode"))); } return channel -> client.executeLocally(SqlClearCursorAction.INSTANCE, sqlRequest, new RestToXContentListener<>(channel)); } @@ -37,4 +39,4 @@ public class RestSqlClearCursorAction extends BaseRestHandler { public String getName() { return "sql_translate_action"; } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlQueryAction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlQueryAction.java index 9d043f855fd..9e34a3fb2e0 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlQueryAction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlQueryAction.java @@ -18,6 +18,8 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.RestResponseListener; +import org.elasticsearch.xpack.sql.proto.Mode; +import org.elasticsearch.xpack.sql.proto.Protocol; import org.elasticsearch.xpack.sql.session.Cursor; import org.elasticsearch.xpack.sql.session.Cursors; @@ -31,15 +33,15 @@ public class RestSqlQueryAction extends BaseRestHandler { public RestSqlQueryAction(Settings settings, RestController controller) { super(settings); - controller.registerHandler(GET, SqlQueryAction.REST_ENDPOINT, this); - controller.registerHandler(POST, SqlQueryAction.REST_ENDPOINT, this); + controller.registerHandler(GET, Protocol.SQL_QUERY_REST_ENDPOINT, this); + controller.registerHandler(POST, Protocol.SQL_QUERY_REST_ENDPOINT, this); } @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { SqlQueryRequest sqlRequest; try (XContentParser parser = request.contentOrSourceParamParser()) { - sqlRequest = SqlQueryRequest.fromXContent(parser, AbstractSqlRequest.Mode.fromString(request.param("mode"))); + sqlRequest = SqlQueryRequest.fromXContent(parser,Mode.fromString(request.param("mode"))); } /* diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlTranslateAction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlTranslateAction.java index 6167e4e571d..503ee843148 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlTranslateAction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlTranslateAction.java @@ -12,6 +12,7 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.sql.proto.Mode; import java.io.IOException; @@ -32,7 +33,7 @@ public class RestSqlTranslateAction extends BaseRestHandler { protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { SqlTranslateRequest sqlRequest; try (XContentParser parser = request.contentOrSourceParamParser()) { - sqlRequest = SqlTranslateRequest.fromXContent(parser, AbstractSqlRequest.Mode.fromString(request.param("mode"))); + sqlRequest = SqlTranslateRequest.fromXContent(parser, Mode.fromString(request.param("mode"))); } return channel -> client.executeLocally(SqlTranslateAction.INSTANCE, sqlRequest, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlLicenseChecker.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlLicenseChecker.java index 8a3ef973d6b..b15ff6a1ae4 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlLicenseChecker.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlLicenseChecker.java @@ -5,6 +5,8 @@ */ package org.elasticsearch.xpack.sql.plugin; +import org.elasticsearch.xpack.sql.proto.Mode; + import java.util.function.Consumer; /** @@ -12,16 +14,16 @@ import java.util.function.Consumer; */ public class SqlLicenseChecker { - private final Consumer checkIfSqlAllowed; + private final Consumer checkIfSqlAllowed; - public SqlLicenseChecker(Consumer checkIfSqlAllowed) { + public SqlLicenseChecker(Consumer checkIfSqlAllowed) { this.checkIfSqlAllowed = checkIfSqlAllowed; } /** * Throws an ElasticsearchSecurityException if the specified mode is not allowed */ - public void checkIfSqlAllowed(AbstractSqlRequest.Mode mode) { + public void checkIfSqlAllowed(Mode mode) { checkIfSqlAllowed.accept(mode); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormat.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormat.java index 349a481cf66..9d0cd60c23e 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormat.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormat.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.sql.plugin; import org.elasticsearch.common.Strings; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.xpack.sql.proto.ColumnInfo; import org.elasticsearch.xpack.sql.session.Cursor; import org.elasticsearch.xpack.sql.session.Cursors; import org.elasticsearch.xpack.sql.util.StringUtils; @@ -38,17 +39,17 @@ enum TextFormat { final CliFormatter formatter; if (cursor instanceof CliFormatterCursor) { formatter = ((CliFormatterCursor) cursor).getCliFormatter(); - return formatter.formatWithoutHeader(response); + return formatter.formatWithoutHeader(response.rows()); } else { - formatter = new CliFormatter(response); - return formatter.formatWithHeader(response); + formatter = new CliFormatter(response.columns(), response.rows()); + return formatter.formatWithHeader(response.columns(), response.rows()); } } @Override Cursor wrapCursor(Cursor oldCursor, SqlQueryResponse response) { CliFormatter formatter = (oldCursor instanceof CliFormatterCursor) ? - ((CliFormatterCursor) oldCursor).getCliFormatter() : new CliFormatter(response); + ((CliFormatterCursor) oldCursor).getCliFormatter() : new CliFormatter(response.columns(), response.rows()); return CliFormatterCursor.wrap(super.wrapCursor(oldCursor, response), formatter); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlQueryAction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlQueryAction.java index 5b59ced7a49..46429e2d508 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlQueryAction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlQueryAction.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.sql.execution.PlanExecutor; +import org.elasticsearch.xpack.sql.proto.ColumnInfo; import org.elasticsearch.xpack.sql.session.Configuration; import org.elasticsearch.xpack.sql.session.Cursors; import org.elasticsearch.xpack.sql.session.RowSet; @@ -26,7 +27,7 @@ import java.util.ArrayList; import java.util.List; import static java.util.Collections.unmodifiableList; -import static org.elasticsearch.xpack.sql.plugin.AbstractSqlRequest.Mode.JDBC; +import static org.elasticsearch.xpack.sql.proto.Mode.JDBC; public class TransportSqlQueryAction extends HandledTransportAction { private final PlanExecutor planExecutor; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Configuration.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Configuration.java index 681a5eb1fbd..ae43d4a9889 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Configuration.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Configuration.java @@ -9,16 +9,14 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.xpack.sql.plugin.AbstractSqlQueryRequest; +import org.elasticsearch.xpack.sql.proto.Protocol; import java.util.TimeZone; -// Typed object holding properties for a given action +// Typed object holding properties for a given action public class Configuration { public static final Configuration DEFAULT = new Configuration(TimeZone.getTimeZone("UTC"), - AbstractSqlQueryRequest.DEFAULT_FETCH_SIZE, - AbstractSqlQueryRequest.DEFAULT_REQUEST_TIMEOUT, - AbstractSqlQueryRequest.DEFAULT_PAGE_TIMEOUT, - null); + Protocol.FETCH_SIZE, Protocol.REQUEST_TIMEOUT, Protocol.PAGE_TIMEOUT, null); private TimeZone timeZone; private int pageSize; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SqlSession.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SqlSession.java index 880e98c6064..65da32c3122 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SqlSession.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SqlSession.java @@ -21,7 +21,7 @@ import org.elasticsearch.xpack.sql.plan.TableIdentifier; import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.sql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.sql.planner.Planner; -import org.elasticsearch.xpack.sql.plugin.SqlTypedParamValue; +import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; import org.elasticsearch.xpack.sql.rule.RuleExecutor; import java.util.List; @@ -162,4 +162,4 @@ public class SqlSession { public Configuration settings() { return settings; } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlActionIT.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlActionIT.java index b36fa811d3b..22a7889f624 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlActionIT.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlActionIT.java @@ -7,10 +7,10 @@ package org.elasticsearch.xpack.sql.action; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.WriteRequest; -import org.elasticsearch.xpack.sql.plugin.AbstractSqlRequest.Mode; -import org.elasticsearch.xpack.sql.plugin.ColumnInfo; +import org.elasticsearch.xpack.sql.proto.ColumnInfo; import org.elasticsearch.xpack.sql.plugin.SqlQueryAction; import org.elasticsearch.xpack.sql.plugin.SqlQueryResponse; +import org.elasticsearch.xpack.sql.proto.Mode; import java.sql.JDBCType; diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CursorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CursorTests.java index 0cd8c33b116..bac221df2e9 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CursorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CursorTests.java @@ -14,7 +14,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.sql.SqlException; import org.elasticsearch.xpack.sql.plugin.CliFormatter; import org.elasticsearch.xpack.sql.plugin.CliFormatterCursor; -import org.elasticsearch.xpack.sql.plugin.ColumnInfo; +import org.elasticsearch.xpack.sql.proto.ColumnInfo; import org.elasticsearch.xpack.sql.plugin.SqlQueryResponse; import org.elasticsearch.xpack.sql.session.Configuration; import org.elasticsearch.xpack.sql.session.Cursor; @@ -80,7 +80,8 @@ public class CursorTests extends ESTestCase { () -> { SqlQueryResponse response = createRandomSqlResponse(); if (response.columns() != null && response.rows() != null) { - return CliFormatterCursor.wrap(ScrollCursorTests.randomScrollCursor(), new CliFormatter(response)); + return CliFormatterCursor.wrap(ScrollCursorTests.randomScrollCursor(), + new CliFormatter(response.columns(), response.rows())); } else { return ScrollCursorTests.randomScrollCursor(); } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/ParameterTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/ParameterTests.java index 5e359659859..37ab5fb2b6c 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/ParameterTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/ParameterTests.java @@ -12,7 +12,7 @@ import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.Sub; import org.elasticsearch.xpack.sql.expression.predicate.Equals; import org.elasticsearch.xpack.sql.parser.ParsingException; import org.elasticsearch.xpack.sql.parser.SqlParser; -import org.elasticsearch.xpack.sql.plugin.SqlTypedParamValue; +import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; import org.elasticsearch.xpack.sql.type.DataType; import java.util.Arrays; @@ -28,7 +28,7 @@ public class ParameterTests extends ESTestCase { public void testSingleParameter() { Expression expression = new SqlParser().createExpression("a = \n?", Collections.singletonList( - new SqlTypedParamValue("foo", DataType.KEYWORD) + new SqlTypedParamValue(DataType.KEYWORD, "foo") )); logger.info(expression); assertThat(expression, instanceOf(Equals.class)); @@ -42,10 +42,10 @@ public class ParameterTests extends ESTestCase { public void testMultipleParameters() { Expression expression = new SqlParser().createExpression("(? + ? * ?) - ?", Arrays.asList( - new SqlTypedParamValue(1L, DataType.LONG), - new SqlTypedParamValue(2L, DataType.LONG), - new SqlTypedParamValue(3L, DataType.LONG), - new SqlTypedParamValue(4L, DataType.LONG) + new SqlTypedParamValue(DataType.LONG, 1L), + new SqlTypedParamValue(DataType.LONG, 2L), + new SqlTypedParamValue(DataType.LONG, 3L), + new SqlTypedParamValue(DataType.LONG, 4L) )); assertThat(expression, instanceOf(Sub.class)); Sub sub = (Sub) expression; @@ -62,9 +62,9 @@ public class ParameterTests extends ESTestCase { public void testNotEnoughParameters() { ParsingException ex = expectThrows(ParsingException.class, () -> new SqlParser().createExpression("(? + ? * ?) - ?", Arrays.asList( - new SqlTypedParamValue(1L, DataType.LONG), - new SqlTypedParamValue(2L, DataType.LONG), - new SqlTypedParamValue(3L, DataType.LONG) + new SqlTypedParamValue(DataType.LONG, 1L), + new SqlTypedParamValue(DataType.LONG, 2L), + new SqlTypedParamValue(DataType.LONG, 3L) ))); assertThat(ex.getMessage(), containsString("Not enough actual parameters")); } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/LikeEscapingParsingTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/LikeEscapingParsingTests.java index c94bcf0e664..b2abf0b6800 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/LikeEscapingParsingTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/LikeEscapingParsingTests.java @@ -9,7 +9,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.expression.regex.Like; import org.elasticsearch.xpack.sql.expression.regex.LikePattern; -import org.elasticsearch.xpack.sql.plugin.SqlTypedParamValue; +import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; import org.elasticsearch.xpack.sql.type.DataType; import java.util.Locale; @@ -33,7 +33,7 @@ public class LikeEscapingParsingTests extends ESTestCase { Expression exp = null; boolean parameterized = randomBoolean(); if (parameterized) { - exp = parser.createExpression("exp LIKE ?", singletonList(new SqlTypedParamValue(pattern, DataType.KEYWORD))); + exp = parser.createExpression("exp LIKE ?", singletonList(new SqlTypedParamValue(DataType.KEYWORD, pattern))); } else { exp = parser.createExpression(String.format(Locale.ROOT, "exp LIKE '%s'", pattern)); } @@ -63,9 +63,9 @@ public class LikeEscapingParsingTests extends ESTestCase { assertThat(error("'%string' ESCAPE '%'"), is("line 1:28: Char [%] cannot be used for escaping")); } - + public void testCannotUseStar() { assertThat(error("'|*string' ESCAPE '|'"), is("line 1:11: Invalid char [*] found in pattern [|*string] at position 1; use [%] or [_] instead")); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java index c08c423be34..e42ec51b425 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java @@ -17,7 +17,7 @@ import org.elasticsearch.xpack.sql.analysis.index.IndexResolver.IndexType; import org.elasticsearch.xpack.sql.expression.function.FunctionRegistry; import org.elasticsearch.xpack.sql.parser.SqlParser; import org.elasticsearch.xpack.sql.plan.logical.command.Command; -import org.elasticsearch.xpack.sql.plugin.SqlTypedParamValue; +import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; import org.elasticsearch.xpack.sql.session.SchemaRowSet; import org.elasticsearch.xpack.sql.session.SqlSession; import org.elasticsearch.xpack.sql.type.DataTypes; @@ -228,7 +228,7 @@ public class SysTablesTests extends ESTestCase { } private SqlTypedParamValue param(Object value) { - return new SqlTypedParamValue(value, DataTypes.fromJava(value)); + return new SqlTypedParamValue(DataTypes.fromJava(value), value); } private Tuple sql(String sql, List params) { diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/CliFormatterTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/CliFormatterTests.java index 1fe3c9fc89e..d87dba33068 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/CliFormatterTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/CliFormatterTests.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.sql.plugin; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.proto.ColumnInfo; import java.sql.JDBCType; import java.util.Arrays; @@ -23,17 +24,17 @@ public class CliFormatterTests extends ESTestCase { Arrays.asList( Arrays.asList("15charwidedata!", 1, 6.888, 12, "rabbit"), Arrays.asList("dog", 1.7976931348623157E308, 123124.888, 9912, "goat"))); - private final CliFormatter formatter = new CliFormatter(firstResponse); + private final CliFormatter formatter = new CliFormatter(firstResponse.columns(), firstResponse.rows()); /** - * Tests for {@link CliFormatter#formatWithHeader(SqlQueryResponse)}, values + * Tests for {@link CliFormatter#formatWithHeader}, values * of exactly the minimum column size, column names of exactly * the minimum column size, column headers longer than the * minimum column size, and values longer than the minimum * column size. */ public void testFormatWithHeader() { - String[] result = formatter.formatWithHeader(firstResponse).split("\n"); + String[] result = formatter.formatWithHeader(firstResponse.columns(), firstResponse.rows()).split("\n"); assertThat(result, arrayWithSize(4)); assertEquals(" foo | bar |15charwidename!|superduperwidename!!!| baz ", result[0]); assertEquals("---------------+----------------------+---------------+---------------------+---------------", result[1]); @@ -42,14 +43,14 @@ public class CliFormatterTests extends ESTestCase { } /** - * Tests for {@link CliFormatter#formatWithoutHeader(SqlQueryResponse)} and + * Tests for {@link CliFormatter#formatWithoutHeader} and * truncation of long columns. */ public void testFormatWithoutHeader() { - String[] result = formatter.formatWithoutHeader(new SqlQueryResponse("", null, + String[] result = formatter.formatWithoutHeader( Arrays.asList( Arrays.asList("ohnotruncateddata", 4, 1, 77, "wombat"), - Arrays.asList("dog", 2, 123124.888, 9912, "goat")))).split("\n"); + Arrays.asList("dog", 2, 123124.888, 9912, "goat"))).split("\n"); assertThat(result, arrayWithSize(2)); assertEquals("ohnotruncatedd~|4 |1 |77 |wombat ", result[0]); assertEquals("dog |2 |123124.888 |9912 |goat ", result[1]); @@ -59,9 +60,9 @@ public class CliFormatterTests extends ESTestCase { * Ensure that our estimates are perfect in at least some cases. */ public void testEstimateSize() { - assertEquals(formatter.formatWithHeader(firstResponse).length(), + assertEquals(formatter.formatWithHeader(firstResponse.columns(), firstResponse.rows()).length(), formatter.estimateSize(firstResponse.rows().size() + 2)); - assertEquals(formatter.formatWithoutHeader(firstResponse).length(), + assertEquals(formatter.formatWithoutHeader(firstResponse.rows()).length(), formatter.estimateSize(firstResponse.rows().size())); } } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/TextFormatTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/TextFormatTests.java index 1c6bbfa69e8..bf6ccbb225a 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/TextFormatTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/TextFormatTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.FakeRestRequest; +import org.elasticsearch.xpack.sql.proto.ColumnInfo; import java.util.ArrayList; import java.util.List; From 69481b4059353d197586e3092e667e45f208f35c Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 14 May 2018 18:27:36 -0400 Subject: [PATCH 19/31] LLRest: Add equals and hashcode tests for Request (#30584) Adds tests for the `Request` object's equals and hashcode to remove a `TODO` and because we use the `equals` method in other testing. --- .../elasticsearch/client/RequestTests.java | 102 +++++++++++++++++- 1 file changed, 101 insertions(+), 1 deletion(-) diff --git a/client/rest/src/test/java/org/elasticsearch/client/RequestTests.java b/client/rest/src/test/java/org/elasticsearch/client/RequestTests.java index b83115a5341..6625c389c6b 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RequestTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RequestTests.java @@ -26,13 +26,16 @@ import java.util.Map; import org.apache.http.Header; import org.apache.http.HttpEntity; +import org.apache.http.entity.ByteArrayEntity; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; import org.apache.http.message.BasicHeader; import org.apache.http.nio.entity.NStringEntity; +import org.elasticsearch.client.HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertNull; import static org.junit.Assert.fail; @@ -151,6 +154,103 @@ public class RequestTests extends RestClientTestCase { assertArrayEquals(headers, request.getHeaders()); } - // TODO equals and hashcode + public void testEqualsAndHashCode() { + Request request = randomRequest(); + assertEquals(request, request); + Request copy = copy(request); + assertEquals(request, copy); + assertEquals(copy, request); + assertEquals(request.hashCode(), copy.hashCode()); + + Request mutant = mutate(request); + assertNotEquals(request, mutant); + assertNotEquals(mutant, request); + } + + private Request randomRequest() { + Request request = new Request( + randomFrom(new String[] {"GET", "PUT", "DELETE", "POST", "HEAD", "OPTIONS"}), + randomAsciiAlphanumOfLength(5)); + + int parameterCount = between(0, 5); + for (int i = 0; i < parameterCount; i++) { + request.addParameter(randomAsciiAlphanumOfLength(i), randomAsciiLettersOfLength(3)); + } + + if (randomBoolean()) { + if (randomBoolean()) { + request.setJsonEntity(randomAsciiAlphanumOfLength(10)); + } else { + request.setEntity(randomFrom(new HttpEntity[] { + new StringEntity(randomAsciiAlphanumOfLength(10), ContentType.APPLICATION_JSON), + new NStringEntity(randomAsciiAlphanumOfLength(10), ContentType.APPLICATION_JSON), + new ByteArrayEntity(randomBytesOfLength(40), ContentType.APPLICATION_JSON) + })); + } + } + + if (randomBoolean()) { + int headerCount = between(1, 5); + Header[] headers = new Header[headerCount]; + for (int i = 0; i < headerCount; i++) { + headers[i] = new BasicHeader(randomAsciiAlphanumOfLength(3), randomAsciiAlphanumOfLength(3)); + } + request.setHeaders(headers); + } + + if (randomBoolean()) { + request.setHttpAsyncResponseConsumerFactory(new HeapBufferedResponseConsumerFactory(1)); + } + + return request; + } + + private Request copy(Request request) { + Request copy = new Request(request.getMethod(), request.getEndpoint()); + copyMutables(request, copy); + return copy; + } + + private Request mutate(Request request) { + if (randomBoolean()) { + // Mutate request or method but keep everything else constant + Request mutant = randomBoolean() + ? new Request(request.getMethod() + "m", request.getEndpoint()) + : new Request(request.getMethod(), request.getEndpoint() + "m"); + copyMutables(request, mutant); + return mutant; + } + Request mutant = copy(request); + int mutationType = between(0, 3); + switch (mutationType) { + case 0: + mutant.addParameter(randomAsciiAlphanumOfLength(mutant.getParameters().size() + 4), "extra"); + return mutant; + case 1: + mutant.setJsonEntity("mutant"); // randomRequest can't produce this value + return mutant; + case 2: + if (mutant.getHeaders().length > 0) { + mutant.setHeaders(new Header[0]); + } else { + mutant.setHeaders(new BasicHeader("extra", "m")); + } + return mutant; + case 3: + mutant.setHttpAsyncResponseConsumerFactory(new HeapBufferedResponseConsumerFactory(5)); + return mutant; + default: + throw new UnsupportedOperationException("Unknown mutation type [" + mutationType + "]"); + } + } + + private void copyMutables(Request from, Request to) { + for (Map.Entry param : from.getParameters().entrySet()) { + to.addParameter(param.getKey(), param.getValue()); + } + to.setEntity(from.getEntity()); + to.setHeaders(from.getHeaders()); + to.setHttpAsyncResponseConsumerFactory(from.getHttpAsyncResponseConsumerFactory()); + } } From 7f47ff9fcd1b6af8128baffaacd99192d5033aa8 Mon Sep 17 00:00:00 2001 From: lcawl Date: Mon, 14 May 2018 15:35:02 -0700 Subject: [PATCH 20/31] [DOCS] Fixes title capitalization in security content --- x-pack/docs/en/security/auditing.asciidoc | 51 ++++++++++--------- .../active-directory-realm.asciidoc | 1 + .../authentication/anonymous-access.asciidoc | 3 +- .../authentication/built-in-users.asciidoc | 1 + .../authentication/custom-realm.asciidoc | 7 +-- .../authentication/file-realm.asciidoc | 1 + .../authentication/internal-users.asciidoc | 1 + .../authentication/ldap-realm.asciidoc | 1 + .../authentication/native-realm.asciidoc | 1 + .../security/authentication/overview.asciidoc | 1 + .../authentication/pki-realm.asciidoc | 1 + .../security/authentication/realms.asciidoc | 1 + .../authentication/saml-guide.asciidoc | 3 +- .../authentication/saml-realm.asciidoc | 1 + .../authentication/user-cache.asciidoc | 5 +- .../authorization/alias-privileges.asciidoc | 3 +- .../custom-roles-provider.asciidoc | 7 +-- ...field-and-document-access-control.asciidoc | 13 ++--- .../authorization/mapping-roles.asciidoc | 13 ++--- .../overview.asciidoc} | 25 ++++----- .../authorization/run-as-privilege.asciidoc | 3 +- .../ccs-clients-integrations.asciidoc | 3 +- .../docs/en/security/getting-started.asciidoc | 3 +- .../en/security/how-security-works.asciidoc | 5 +- x-pack/docs/en/security/index.asciidoc | 5 +- x-pack/docs/en/security/limitations.asciidoc | 1 + x-pack/docs/en/security/reference.asciidoc | 1 + .../security/securing-communications.asciidoc | 5 +- .../docs/en/security/troubleshooting.asciidoc | 1 + .../en/security/using-ip-filtering.asciidoc | 7 +-- 30 files changed, 102 insertions(+), 72 deletions(-) rename x-pack/docs/en/security/{authorization.asciidoc => authorization/overview.asciidoc} (96%) diff --git a/x-pack/docs/en/security/auditing.asciidoc b/x-pack/docs/en/security/auditing.asciidoc index 6cd31d076f9..ee508a5ac8d 100644 --- a/x-pack/docs/en/security/auditing.asciidoc +++ b/x-pack/docs/en/security/auditing.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[auditing]] -== Auditing Security Events +== Auditing security events You can enable auditing to keep track of security-related events such as authentication failures and refused connections. Logging these events enables you @@ -40,7 +41,7 @@ events are pushed to the index by setting [float] [[audit-event-types]] -=== Audit Event Types +=== Audit event types Each request may generate multiple audit events. The following is a list of the events that can be generated: @@ -81,11 +82,11 @@ The following is a list of the events that can be generated: [float] [[audit-event-attributes]] -=== Audit Event Attributes +=== Audit event attributes The following table shows the common attributes that can be associated with every event. -.Common Attributes +.Common attributes [cols="2,7",options="header"] |====== | Attribute | Description @@ -103,7 +104,7 @@ The following table shows the common attributes that can be associated with ever The following tables show the attributes that can be associated with each type of event. The log level determines which attributes are included in a log entry. -.REST anonymous_access_denied Attributes +.REST anonymous_access_denied attributes [cols="2,7",options="header"] |====== | Attribute | Description @@ -112,7 +113,7 @@ The log level determines which attributes are included in a log entry. | `request_body` | The body of the request, if enabled. |====== -.REST authentication_success Attributes +.REST authentication_success attributes [cols="2,7",options="header"] |====== | Attribute | Description @@ -123,7 +124,7 @@ The log level determines which attributes are included in a log entry. | `request_body` | The body of the request, if enabled. |====== -.REST authentication_failed Attributes +.REST authentication_failed attributes [cols="2,7",options="header"] |====== | Attribute | Description @@ -133,7 +134,7 @@ The log level determines which attributes are included in a log entry. | `request_body` | The body of the request, if enabled. |====== -.REST realm_authentication_failed Attributes +.REST realm_authentication_failed attributes [cols="2,7",options="header"] |====== | Attribute | Description @@ -146,7 +147,7 @@ The log level determines which attributes are included in a log entry. consulted realm. |====== -.Transport anonymous_access_denied Attributes +.Transport anonymous_access_denied attributes [cols="2,7",options="header"] |====== | Attribute | Description @@ -161,7 +162,7 @@ The log level determines which attributes are included in a log entry. pertains to (when applicable). |====== -.Transport authentication_success Attributes +.Transport authentication_success attributes [cols="2,7",options="header"] |====== | Attribute | Description @@ -176,7 +177,7 @@ The log level determines which attributes are included in a log entry. | `request` | The type of request that was executed. |====== -.Transport authentication_failed Attributes +.Transport authentication_failed attributes [cols="2,7",options="header"] |====== | Attribute | Description @@ -192,7 +193,7 @@ The log level determines which attributes are included in a log entry. pertains to (when applicable). |====== -.Transport realm_authentication_failed Attributes +.Transport realm_authentication_failed attributes [cols="2,7",options="header"] |====== | Attribute | Description @@ -211,7 +212,7 @@ The log level determines which attributes are included in a log entry. consulted realm. |====== -.Transport access_granted Attributes +.Transport access_granted attributes [cols="2,7",options="header"] |====== | Attribute | Description @@ -228,7 +229,7 @@ The log level determines which attributes are included in a log entry. pertains to (when applicable). |====== -.Transport access_denied Attributes +.Transport access_denied attributes [cols="2,7",options="header"] |====== | Attribute | Description @@ -245,7 +246,7 @@ The log level determines which attributes are included in a log entry. relates to (when applicable). |====== -.Transport tampered_request Attributes +.Transport tampered_request attributes [cols="2,7",options="header"] |====== | Attribute | Description @@ -261,7 +262,7 @@ The log level determines which attributes are included in a log entry. pertains to (when applicable). |====== -.IP Filter connection_granted Attributes +.IP filter connection_granted attributes [cols="2,7",options="header"] |====== | Attribute | Description @@ -271,7 +272,7 @@ The log level determines which attributes are included in a log entry. the request. |====== -.IP Filter connection_denied Attributes +.IP filter connection_denied attributes [cols="2,7",options="header"] |====== | Attribute | Description @@ -283,14 +284,14 @@ The log level determines which attributes are included in a log entry. [float] [[audit-log-output]] -=== Logfile Audit Output +=== Logfile audit output The `logfile` audit output is the default output for auditing. It writes data to the `_access.log` file in the logs directory. [float] [[audit-log-entry-format]] -=== Log Entry Format +=== Log entry format The format of a log entry is: @@ -318,7 +319,7 @@ The format of a log entry is: [float] [[audit-log-settings]] -=== Logfile Output Settings +=== Logfile output settings The events and some other information about what gets logged can be controlled using settings in the `elasticsearch.yml` file. See @@ -336,7 +337,7 @@ file located in `CONFIG_DIR`. By default, audit information is appended to the [float] [[audit-log-ignore-policy]] -=== Logfile Audit Events Ignore Policies +=== Logfile audit events ignore policies The comprehensive audit trail is necessary to ensure accountability. It offers tremendous value during incident response and can even be required for demonstrating compliance. @@ -414,7 +415,7 @@ xpack.security.audit.logfile.events.ignore_filters: [float] [[audit-index]] -=== Index Audit Output +=== Index audit output In addition to logging to a file, you can store audit logs in Elasticsearch rolling indices. These indices can be either on the same cluster, or on a @@ -429,13 +430,13 @@ xpack.security.audit.outputs: [ index, logfile ] ---------------------------- For more configuration options, see -{ref}/auditing-settings.html#index-audit-settings[Audit Log Indexing Configuration Settings]. +{ref}/auditing-settings.html#index-audit-settings[Audit log indexing configuration settings]. IMPORTANT: No filtering is performed when auditing, so sensitive data may be audited in plain text when including the request body in audit events. [float] -==== Audit Index Settings +==== Audit index settings You can also configure settings for the indices that the events are stored in. These settings are configured in the `xpack.security.audit.index.settings` namespace @@ -451,7 +452,7 @@ xpack.security.audit.index.settings: ---------------------------- [float] -==== Forwarding Audit Logs to a Remote Cluster +==== Forwarding audit logs to a remote cluster To index audit events to a remote Elasticsearch cluster, you configure the following `xpack.security.audit.index.client` settings: diff --git a/x-pack/docs/en/security/authentication/active-directory-realm.asciidoc b/x-pack/docs/en/security/authentication/active-directory-realm.asciidoc index 2069176172e..c0461f4f338 100644 --- a/x-pack/docs/en/security/authentication/active-directory-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/active-directory-realm.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[active-directory-realm]] === Active Directory user authentication diff --git a/x-pack/docs/en/security/authentication/anonymous-access.asciidoc b/x-pack/docs/en/security/authentication/anonymous-access.asciidoc index c95328e99a3..983348f8cf5 100644 --- a/x-pack/docs/en/security/authentication/anonymous-access.asciidoc +++ b/x-pack/docs/en/security/authentication/anonymous-access.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[anonymous-access]] -=== Enabling Anonymous Access +=== Enabling anonymous access Incoming requests are considered to be _anonymous_ if no authentication token can be extracted from the incoming request. By default, anonymous requests are rejected and an authentication error is returned (status code `401`). diff --git a/x-pack/docs/en/security/authentication/built-in-users.asciidoc b/x-pack/docs/en/security/authentication/built-in-users.asciidoc index 2400643755a..74fc9f1e1db 100644 --- a/x-pack/docs/en/security/authentication/built-in-users.asciidoc +++ b/x-pack/docs/en/security/authentication/built-in-users.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[built-in-users]] === Built-in users diff --git a/x-pack/docs/en/security/authentication/custom-realm.asciidoc b/x-pack/docs/en/security/authentication/custom-realm.asciidoc index a7df6f5ff86..8e0114b7454 100644 --- a/x-pack/docs/en/security/authentication/custom-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/custom-realm.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[custom-realms]] -=== Integrating with Other Authentication Systems +=== Integrating with other authentication systems If you are using an authentication system that is not supported out-of-the-box by {security}, you can create a custom realm to interact with it to authenticate @@ -7,7 +8,7 @@ users. You implement a custom realm as an SPI loaded security extension as part of an ordinary elasticsearch plugin. [[implementing-custom-realm]] -==== Implementing a Custom Realm +==== Implementing a custom realm Sample code that illustrates the structure and implementation of a custom realm is provided in the https://github.com/elastic/shield-custom-realm-example[custom-realm-example] @@ -70,7 +71,7 @@ part of the `SecurityExtension` interface, it's available as part of the elastic . Bundle all in a single zip file. [[using-custom-realm]] -==== Using a Custom Realm to Authenticate Users +==== Using a custom realm to authenticate users To use a custom realm: diff --git a/x-pack/docs/en/security/authentication/file-realm.asciidoc b/x-pack/docs/en/security/authentication/file-realm.asciidoc index cf6f5cacd1c..1161778bb80 100644 --- a/x-pack/docs/en/security/authentication/file-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/file-realm.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[file-realm]] === File-based user authentication diff --git a/x-pack/docs/en/security/authentication/internal-users.asciidoc b/x-pack/docs/en/security/authentication/internal-users.asciidoc index 53468363dc8..77571a53a56 100644 --- a/x-pack/docs/en/security/authentication/internal-users.asciidoc +++ b/x-pack/docs/en/security/authentication/internal-users.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[internal-users]] === Internal users diff --git a/x-pack/docs/en/security/authentication/ldap-realm.asciidoc b/x-pack/docs/en/security/authentication/ldap-realm.asciidoc index 205c18429bc..02d0162a9c9 100644 --- a/x-pack/docs/en/security/authentication/ldap-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/ldap-realm.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[ldap-realm]] === LDAP user authentication diff --git a/x-pack/docs/en/security/authentication/native-realm.asciidoc b/x-pack/docs/en/security/authentication/native-realm.asciidoc index 3643e42e02a..f7b514b8144 100644 --- a/x-pack/docs/en/security/authentication/native-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/native-realm.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[native-realm]] === Native user authentication diff --git a/x-pack/docs/en/security/authentication/overview.asciidoc b/x-pack/docs/en/security/authentication/overview.asciidoc index ada5453c7a7..da5f6a4ea3c 100644 --- a/x-pack/docs/en/security/authentication/overview.asciidoc +++ b/x-pack/docs/en/security/authentication/overview.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[setting-up-authentication]] == User authentication diff --git a/x-pack/docs/en/security/authentication/pki-realm.asciidoc b/x-pack/docs/en/security/authentication/pki-realm.asciidoc index 4fc91717f93..6ce9b0e0770 100644 --- a/x-pack/docs/en/security/authentication/pki-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/pki-realm.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[pki-realm]] === PKI user authentication diff --git a/x-pack/docs/en/security/authentication/realms.asciidoc b/x-pack/docs/en/security/authentication/realms.asciidoc index 7bd48c5c8f0..ec0945b5a11 100644 --- a/x-pack/docs/en/security/authentication/realms.asciidoc +++ b/x-pack/docs/en/security/authentication/realms.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[realms]] === Realms diff --git a/x-pack/docs/en/security/authentication/saml-guide.asciidoc b/x-pack/docs/en/security/authentication/saml-guide.asciidoc index d1f7961fecb..740f51c877d 100644 --- a/x-pack/docs/en/security/authentication/saml-guide.asciidoc +++ b/x-pack/docs/en/security/authentication/saml-guide.asciidoc @@ -1,6 +1,7 @@ +[role="xpack"] [[saml-guide]] -== Configuring SAML single-sign-on on the Elastic Stack +== Configuring SAML single-sign-on on the {stack} The Elastic Stack supports SAML single-sign-on (SSO) into {kib}, using {es} as a backend service. In SAML terminology, the Elastic Stack is operating as a diff --git a/x-pack/docs/en/security/authentication/saml-realm.asciidoc b/x-pack/docs/en/security/authentication/saml-realm.asciidoc index c05f82d341b..a55ae270a19 100644 --- a/x-pack/docs/en/security/authentication/saml-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/saml-realm.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[saml-realm]] === SAML authentication {security} supports user authentication using SAML Single Sign On. diff --git a/x-pack/docs/en/security/authentication/user-cache.asciidoc b/x-pack/docs/en/security/authentication/user-cache.asciidoc index ba2b363a843..36af070bf06 100644 --- a/x-pack/docs/en/security/authentication/user-cache.asciidoc +++ b/x-pack/docs/en/security/authentication/user-cache.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[controlling-user-cache]] -=== Controlling the User Cache +=== Controlling the user cache User credentials are cached in memory on each node to avoid connecting to a remote authentication service or hitting the disk for every incoming request. @@ -34,7 +35,7 @@ setting the `cache_hash_algo` setting to any of the following: |======================= [[cache-eviction-api]] -==== Evicting Users from the Cache +==== Evicting users from the cache {security} exposes a {ref}/security-api-clear-cache.html[Clear Cache API] you can use diff --git a/x-pack/docs/en/security/authorization/alias-privileges.asciidoc b/x-pack/docs/en/security/authorization/alias-privileges.asciidoc index 6916e2ab2ca..05c9359df5a 100644 --- a/x-pack/docs/en/security/authorization/alias-privileges.asciidoc +++ b/x-pack/docs/en/security/authorization/alias-privileges.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[securing-aliases]] -=== Granting Privileges for Indices & Aliases +=== Granting privileges for indices and aliases Elasticsearch allows to execute operations against {ref}/indices-aliases.html[index aliases], which are effectively virtual indices. An alias points to one or more indices, diff --git a/x-pack/docs/en/security/authorization/custom-roles-provider.asciidoc b/x-pack/docs/en/security/authorization/custom-roles-provider.asciidoc index 9056467ced9..c218fa04f8e 100644 --- a/x-pack/docs/en/security/authorization/custom-roles-provider.asciidoc +++ b/x-pack/docs/en/security/authorization/custom-roles-provider.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[custom-roles-provider]] -=== Custom Roles Provider Extension +=== Custom roles provider extension If you need to retrieve user roles from a system not supported out-of-the-box by {security}, you can create a custom roles provider to retrieve and resolve @@ -7,7 +8,7 @@ roles. You implement a custom roles provider as an SPI loaded security extension as part of an ordinary elasticsearch plugin. [[implementing-custom-roles-provider]] -==== Implementing a Custom Roles Provider +==== Implementing a custom roles provider To create a custom roles provider: @@ -62,7 +63,7 @@ part of the `SecurityExtension` interface, it's available as part of the elastic . Bundle all in a single zip file. [[using-custom-roles-provider]] -==== Using a Custom Roles Provider to Resolve Roles +==== Using a custom roles provider to resolve roles To use a custom roles provider: diff --git a/x-pack/docs/en/security/authorization/field-and-document-access-control.asciidoc b/x-pack/docs/en/security/authorization/field-and-document-access-control.asciidoc index 88d0e157ca0..a1aa44895c6 100644 --- a/x-pack/docs/en/security/authorization/field-and-document-access-control.asciidoc +++ b/x-pack/docs/en/security/authorization/field-and-document-access-control.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[field-and-document-access-control]] -=== Setting Up Field and Document Level Security +=== Setting up field and document level security You can control access to data within an index by adding field and document level security permissions to a role. Field level security permissions restrict access @@ -23,7 +24,7 @@ document level permissions per index. See <>. ===================================================================== [[field-level-security]] -==== Field Level Security +==== Field level security To enable field level security, specify the fields that each role can access as part of the indices permissions in a role definition. Field level security is @@ -235,7 +236,7 @@ The resulting permission is equal to: [[document-level-security]] -==== Document Level Security +==== Document level security Document level security restricts the documents that users have read access to. To enable document level security, specify a query that matches all the @@ -292,7 +293,7 @@ For example, the following role grants read access only to the documents whose NOTE: `query` also accepts queries written as string values. [[templating-role-query]] -===== Templating a Role Query +===== Templating a role query You can use Mustache templates in a role query to insert the username of the current authenticated user into the role. Like other places in {es} that support @@ -358,7 +359,7 @@ based on the `group.id` field in your documents: -------------------------------------------------- [[set-security-user-processor]] -===== Set Security User Ingest Processor +===== Set security user ingest processor If an index is shared by many small users it makes sense to put all these users into the same index. Having a dedicated index or shard per user is wasteful. @@ -416,7 +417,7 @@ to the `user` field for all documents that are processed by this pipeline: -------------------------------------------------- [[multiple-roles-dls-fls]] -==== Multiple Roles with Document and Field Level Security +==== Multiple roles with document and field level security A user can have many roles and each role can define different permissions on the same index. It is important to understand the behavior of document and field diff --git a/x-pack/docs/en/security/authorization/mapping-roles.asciidoc b/x-pack/docs/en/security/authorization/mapping-roles.asciidoc index fba87db9786..cf8373a65f3 100644 --- a/x-pack/docs/en/security/authorization/mapping-roles.asciidoc +++ b/x-pack/docs/en/security/authorization/mapping-roles.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[mapping-roles]] -=== Mapping Users and Groups to Roles +=== Mapping users and groups to roles If you authenticate users with the `native` or `file` realms, you can manage role assignment by using the <> or @@ -24,13 +25,13 @@ you are able to map users to both API-managed roles and file-managed roles (and likewise for file-based role-mappings). [[mapping-roles-api]] -==== Using the Role Mapping API +==== Using the role mapping API You can define role-mappings through the {ref}/security-api-role-mapping.html[role mapping API]. [[mapping-roles-file]] -==== Using Role Mapping Files +==== Using role mapping files To use file based role-mappings, you must configure the mappings in a YAML file and copy it to each node in the cluster. Tools like Puppet or Chef can help with @@ -56,10 +57,10 @@ You can change this default behavior by changing the this is a common setting in Elasticsearch, changing its value might effect other schedules in the system. -==== Realm Specific Details +==== Realm specific details [float] [[ldap-role-mapping]] -===== Active Directory and LDAP Realms +===== Active Directory and LDAP realms To specify users and groups in the role mappings, you use their _Distinguished Names_ (DNs). A DN is a string that uniquely identifies the user @@ -113,7 +114,7 @@ PUT _xpack/security/role_mapping/basic_users [float] [[pki-role-mapping]] -===== PKI Realms +===== PKI realms PKI realms support mapping users to roles, but you cannot map groups as the PKI realm has no notion of a group. diff --git a/x-pack/docs/en/security/authorization.asciidoc b/x-pack/docs/en/security/authorization/overview.asciidoc similarity index 96% rename from x-pack/docs/en/security/authorization.asciidoc rename to x-pack/docs/en/security/authorization/overview.asciidoc index ed171415056..9dc8185db4d 100644 --- a/x-pack/docs/en/security/authorization.asciidoc +++ b/x-pack/docs/en/security/authorization/overview.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[authorization]] -== Configuring Role-based Access Control +== Configuring role-based access control {security} introduces the concept of _authorization_ to {es}. Authorization is the process of determining whether the user behind an incoming @@ -8,7 +9,7 @@ successfully authenticated and the user behind the request is identified. [[roles]] [float] -=== Roles, Permissions and Privileges +=== Roles, permissions, and privileges The authorization process revolves around the following 5 constructs: @@ -49,7 +50,7 @@ then assign users to the roles. These can be assigned to users in a number of ways depending on the realms by which the users are authenticated. [[built-in-roles]] -=== Built-in Roles +=== Built-in roles {security} applies a default role to all users, including <>. The default role enables users to access @@ -164,7 +165,7 @@ stats. [[defining-roles]] -=== Defining Roles +=== Defining roles A role is defined by the following JSON structure: @@ -276,14 +277,14 @@ see <>. [float] [[roles-management-ui]] -=== Role Management UI +=== Role management UI {security} enables you to easily manage users and roles from within {kib}. To manage roles, log in to {kib} and go to *Management / Elasticsearch / Roles*. [float] [[roles-management-api]] -=== Role Management API +=== Role management API The _Role Management APIs_ enable you to add, update, remove and retrieve roles dynamically. When you use the APIs to manage roles in the `native` realm, the @@ -292,7 +293,7 @@ see {ref}/security-api-roles.html[Role Management APIs]. [float] [[roles-management-file]] -=== File-based Role Management +=== File-based role management Apart from the _Role Management APIs_, roles can also be defined in local `roles.yml` file located in `CONFIG_DIR`. This is a YAML file where each @@ -338,12 +339,12 @@ click_admins: {security} continuously monitors the `roles.yml` file and automatically picks up and applies any changes to it. -include::authorization/alias-privileges.asciidoc[] +include::alias-privileges.asciidoc[] -include::authorization/mapping-roles.asciidoc[] +include::mapping-roles.asciidoc[] -include::authorization/field-and-document-access-control.asciidoc[] +include::field-and-document-access-control.asciidoc[] -include::authorization/run-as-privilege.asciidoc[] +include::run-as-privilege.asciidoc[] -include::authorization/custom-roles-provider.asciidoc[] +include::custom-roles-provider.asciidoc[] diff --git a/x-pack/docs/en/security/authorization/run-as-privilege.asciidoc b/x-pack/docs/en/security/authorization/run-as-privilege.asciidoc index e246f2b1942..93d11c0ab2a 100644 --- a/x-pack/docs/en/security/authorization/run-as-privilege.asciidoc +++ b/x-pack/docs/en/security/authorization/run-as-privilege.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[run-as-privilege]] -=== Submitting Requests on Behalf of Other Users +=== Submitting requests on behalf of other users {security} supports a permission that enables an authenticated user to submit requests on behalf of other users. If your application already authenticates diff --git a/x-pack/docs/en/security/ccs-clients-integrations.asciidoc b/x-pack/docs/en/security/ccs-clients-integrations.asciidoc index e25586dfb37..cbf4ede328e 100644 --- a/x-pack/docs/en/security/ccs-clients-integrations.asciidoc +++ b/x-pack/docs/en/security/ccs-clients-integrations.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[ccs-clients-integrations]] -== Cross Cluster Search, Clients and Integrations +== Cross cluster search, clients, and integrations When using {ref}/modules-cross-cluster-search.html[Cross Cluster Search] you need to take extra steps to secure communications with the connected diff --git a/x-pack/docs/en/security/getting-started.asciidoc b/x-pack/docs/en/security/getting-started.asciidoc index 8aa35a94281..b8f1183cddf 100644 --- a/x-pack/docs/en/security/getting-started.asciidoc +++ b/x-pack/docs/en/security/getting-started.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[security-getting-started]] -== Getting Started with Security +== Getting started with security To secure a cluster, you must enable {security} on every node in the cluster. Basic authentication is enabled by default--to communicate diff --git a/x-pack/docs/en/security/how-security-works.asciidoc b/x-pack/docs/en/security/how-security-works.asciidoc index ae402dfe05e..dcc152c2bca 100644 --- a/x-pack/docs/en/security/how-security-works.asciidoc +++ b/x-pack/docs/en/security/how-security-works.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[how-security-works]] -== How Security Works +== How security works An Elasticsearch cluster is typically made out of many moving parts. There are the Elasticsearch nodes that form the cluster, and often Logstash instances, @@ -64,7 +65,7 @@ For more information on user authentication see <> [float] -=== Node/Client Authentication and Channel Encryption +=== Node/client authentication and channel encryption {security} supports configuring SSL/TLS for securing the communication channels to, from and within the cluster. This support accounts for: diff --git a/x-pack/docs/en/security/index.asciidoc b/x-pack/docs/en/security/index.asciidoc index 188353d01a3..d5f970a3fb8 100644 --- a/x-pack/docs/en/security/index.asciidoc +++ b/x-pack/docs/en/security/index.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[xpack-security]] -= Securing the Elastic Stack += Securing the {stack} [partintro] -- @@ -100,7 +101,7 @@ include::how-security-works.asciidoc[] include::authentication/overview.asciidoc[] -include::authorization.asciidoc[] +include::authorization/overview.asciidoc[] include::auditing.asciidoc[] diff --git a/x-pack/docs/en/security/limitations.asciidoc b/x-pack/docs/en/security/limitations.asciidoc index c2616ac6565..c127ee3d796 100644 --- a/x-pack/docs/en/security/limitations.asciidoc +++ b/x-pack/docs/en/security/limitations.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[security-limitations]] == Security Limitations diff --git a/x-pack/docs/en/security/reference.asciidoc b/x-pack/docs/en/security/reference.asciidoc index 90668651b5d..21138138cfb 100644 --- a/x-pack/docs/en/security/reference.asciidoc +++ b/x-pack/docs/en/security/reference.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[security-reference]] == Reference * <> diff --git a/x-pack/docs/en/security/securing-communications.asciidoc b/x-pack/docs/en/security/securing-communications.asciidoc index e876ce9160b..ef07f0113cb 100644 --- a/x-pack/docs/en/security/securing-communications.asciidoc +++ b/x-pack/docs/en/security/securing-communications.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[encrypting-communications]] -== Encrypting Communications +== Encrypting communications Elasticsearch nodes store data that may be confidential. Attacks on the data may come from the network. These attacks could include sniffing of the data, @@ -21,7 +22,7 @@ include::securing-communications/setting-up-ssl.asciidoc[] //TO-DO: These sections can be removed when all links to them are removed. [[ciphers]] -=== Enabling Cipher Suites for Stronger Encryption +=== Enabling cipher suites for stronger encryption See {ref}/ciphers.html[Enabling Cipher Suites for Stronger Encryption]. diff --git a/x-pack/docs/en/security/troubleshooting.asciidoc b/x-pack/docs/en/security/troubleshooting.asciidoc index e805ed07a7d..c202ed9dbed 100644 --- a/x-pack/docs/en/security/troubleshooting.asciidoc +++ b/x-pack/docs/en/security/troubleshooting.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[security-troubleshooting]] == {security} Troubleshooting ++++ diff --git a/x-pack/docs/en/security/using-ip-filtering.asciidoc b/x-pack/docs/en/security/using-ip-filtering.asciidoc index 37beced5a94..817975c69de 100644 --- a/x-pack/docs/en/security/using-ip-filtering.asciidoc +++ b/x-pack/docs/en/security/using-ip-filtering.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[ip-filtering]] -== Restricting Connections with IP Filtering +== Restricting connections with IP filtering You can apply IP filtering to application clients, node clients, or transport clients, in addition to other nodes that are attempting to join the cluster. @@ -92,7 +93,7 @@ transport.profiles.client.xpack.security.filter.deny: _all NOTE: When you do not specify a profile, `default` is used automatically. [float] -=== HTTP Filtering +=== HTTP filtering You may want to have different IP filtering for the transport and HTTP protocols. @@ -106,7 +107,7 @@ xpack.security.http.filter.deny: _all [float] [[dynamic-ip-filtering]] -==== Dynamically updating ip filter settings +==== Dynamically updating IP filter settings In case of running in an environment with highly dynamic IP addresses like cloud based hosting, it is very hard to know the IP addresses upfront when provisioning From 9881bfaea58ef7afbd8d4e77aef83cf855366175 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 14 May 2018 18:40:54 -0400 Subject: [PATCH 21/31] Docs: Document how to rebuild analyzers (#30498) Adds documentation for how to rebuild all the built in analyzers and tests for that documentation using the mechanism added in #29535. Closes #29499 --- .../analyzers/fingerprint-analyzer.asciidoc | 57 ++++++++++++----- .../analyzers/keyword-analyzer.asciidoc | 45 +++++++++++--- .../analyzers/pattern-analyzer.asciidoc | 61 +++++++++++++++---- .../analyzers/simple-analyzer.asciidoc | 42 ++++++++++--- .../analyzers/standard-analyzer.asciidoc | 54 ++++++++++++---- .../analysis/analyzers/stop-analyzer.asciidoc | 58 ++++++++++++++---- .../analyzers/whitespace-analyzer.asciidoc | 42 ++++++++++--- 7 files changed, 284 insertions(+), 75 deletions(-) diff --git a/docs/reference/analysis/analyzers/fingerprint-analyzer.asciidoc b/docs/reference/analysis/analyzers/fingerprint-analyzer.asciidoc index 53c7d913ad2..cc873a4fe89 100644 --- a/docs/reference/analysis/analyzers/fingerprint-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/fingerprint-analyzer.asciidoc @@ -9,20 +9,6 @@ Input text is lowercased, normalized to remove extended characters, sorted, deduplicated and concatenated into a single token. If a stopword list is configured, stop words will also be removed. -[float] -=== Definition - -It consists of: - -Tokenizer:: -* <> - -Token Filters (in order):: -1. <> -2. <> -3. <> (disabled by default) -4. <> - [float] === Example output @@ -149,3 +135,46 @@ The above example produces the following term: --------------------------- [ consistent godel said sentence yes ] --------------------------- + +[float] +=== Definition + +The `fingerprint` tokenizer consists of: + +Tokenizer:: +* <> + +Token Filters (in order):: +* <> +* <> +* <> (disabled by default) +* <> + +If you need to customize the `fingerprint` analyzer beyond the configuration +parameters then you need to recreate it as a `custom` analyzer and modify +it, usually by adding token filters. This would recreate the built-in +`fingerprint` analyzer and you can use it as a starting point for further +customization: + +[source,js] +---------------------------------------------------- +PUT /fingerprint_example +{ + "settings": { + "analysis": { + "analyzer": { + "rebuilt_fingerprint": { + "tokenizer": "standard", + "filter": [ + "lowercase", + "asciifolding", + "fingerprint" + ] + } + } + } + } +} +---------------------------------------------------- +// CONSOLE +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: fingerprint_example, first: fingerprint, second: rebuilt_fingerprint}\nendyaml\n/] diff --git a/docs/reference/analysis/analyzers/keyword-analyzer.asciidoc b/docs/reference/analysis/analyzers/keyword-analyzer.asciidoc index cc94f3b757e..954b514ced6 100644 --- a/docs/reference/analysis/analyzers/keyword-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/keyword-analyzer.asciidoc @@ -4,14 +4,6 @@ The `keyword` analyzer is a ``noop'' analyzer which returns the entire input string as a single token. -[float] -=== Definition - -It consists of: - -Tokenizer:: -* <> - [float] === Example output @@ -57,3 +49,40 @@ The above sentence would produce the following single term: === Configuration The `keyword` analyzer is not configurable. + +[float] +=== Definition + +The `keyword` analyzer consists of: + +Tokenizer:: +* <> + +If you need to customize the `keyword` analyzer then you need to +recreate it as a `custom` analyzer and modify it, usually by adding +token filters. Usually, you should prefer the +<> when you want strings that are not split +into tokens, but just in case you need it, this would recreate the +built-in `keyword` analyzer and you can use it as a starting point +for further customization: + +[source,js] +---------------------------------------------------- +PUT /keyword_example +{ + "settings": { + "analysis": { + "analyzer": { + "rebuilt_keyword": { + "tokenizer": "keyword", + "filter": [ <1> + ] + } + } + } + } +} +---------------------------------------------------- +// CONSOLE +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: keyword_example, first: keyword, second: rebuilt_keyword}\nendyaml\n/] +<1> You'd add any token filters here. diff --git a/docs/reference/analysis/analyzers/pattern-analyzer.asciidoc b/docs/reference/analysis/analyzers/pattern-analyzer.asciidoc index 64ab3999ef9..027f37280a6 100644 --- a/docs/reference/analysis/analyzers/pattern-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/pattern-analyzer.asciidoc @@ -19,19 +19,6 @@ Read more about http://www.regular-expressions.info/catastrophic.html[pathologic ======================================== - -[float] -=== Definition - -It consists of: - -Tokenizer:: -* <> - -Token Filters:: -* <> -* <> (disabled by default) - [float] === Example output @@ -378,3 +365,51 @@ The regex above is easier to understand as: [\p{L}&&[^\p{Lu}]] # then lower case ) -------------------------------------------------- + +[float] +=== Definition + +The `pattern` anlayzer consists of: + +Tokenizer:: +* <> + +Token Filters:: +* <> +* <> (disabled by default) + +If you need to customize the `pattern` analyzer beyond the configuration +parameters then you need to recreate it as a `custom` analyzer and modify +it, usually by adding token filters. This would recreate the built-in +`pattern` analyzer and you can use it as a starting point for further +customization: + +[source,js] +---------------------------------------------------- +PUT /pattern_example +{ + "settings": { + "analysis": { + "tokenizer": { + "split_on_non_word": { + "type": "pattern", + "pattern": "\\W+" <1> + } + }, + "analyzer": { + "rebuilt_pattern": { + "tokenizer": "split_on_non_word", + "filter": [ + "lowercase" <2> + ] + } + } + } + } +} +---------------------------------------------------- +// CONSOLE +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: pattern_example, first: pattern, second: rebuilt_pattern}\nendyaml\n/] +<1> The default pattern is `\W+` which splits on non-word characters +and this is where you'd change it. +<2> You'd add other token filters after `lowercase`. diff --git a/docs/reference/analysis/analyzers/simple-analyzer.asciidoc b/docs/reference/analysis/analyzers/simple-analyzer.asciidoc index a57c30d8dd6..d82655d9bd8 100644 --- a/docs/reference/analysis/analyzers/simple-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/simple-analyzer.asciidoc @@ -4,14 +4,6 @@ The `simple` analyzer breaks text into terms whenever it encounters a character which is not a letter. All terms are lower cased. -[float] -=== Definition - -It consists of: - -Tokenizer:: -* <> - [float] === Example output @@ -127,3 +119,37 @@ The above sentence would produce the following terms: === Configuration The `simple` analyzer is not configurable. + +[float] +=== Definition + +The `simple` analzyer consists of: + +Tokenizer:: +* <> + +If you need to customize the `simple` analyzer then you need to recreate +it as a `custom` analyzer and modify it, usually by adding token filters. +This would recreate the built-in `simple` analyzer and you can use it as +a starting point for further customization: + +[source,js] +---------------------------------------------------- +PUT /simple_example +{ + "settings": { + "analysis": { + "analyzer": { + "rebuilt_simple": { + "tokenizer": "lowercase", + "filter": [ <1> + ] + } + } + } + } +} +---------------------------------------------------- +// CONSOLE +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: simple_example, first: simple, second: rebuilt_simple}\nendyaml\n/] +<1> You'd add any token filters here. diff --git a/docs/reference/analysis/analyzers/standard-analyzer.asciidoc b/docs/reference/analysis/analyzers/standard-analyzer.asciidoc index eacbb1c3cad..20aa072066b 100644 --- a/docs/reference/analysis/analyzers/standard-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/standard-analyzer.asciidoc @@ -7,19 +7,6 @@ Segmentation algorithm, as specified in http://unicode.org/reports/tr29/[Unicode Standard Annex #29]) and works well for most languages. -[float] -=== Definition - -It consists of: - -Tokenizer:: -* <> - -Token Filters:: -* <> -* <> -* <> (disabled by default) - [float] === Example output @@ -276,3 +263,44 @@ The above example produces the following terms: --------------------------- [ 2, quick, brown, foxes, jumpe, d, over, lazy, dog's, bone ] --------------------------- + +[float] +=== Definition + +The `standard` analyzer consists of: + +Tokenizer:: +* <> + +Token Filters:: +* <> +* <> +* <> (disabled by default) + +If you need to customize the `standard` analyzer beyond the configuration +parameters then you need to recreate it as a `custom` analyzer and modify +it, usually by adding token filters. This would recreate the built-in +`standard` analyzer and you can use it as a starting point: + +[source,js] +---------------------------------------------------- +PUT /standard_example +{ + "settings": { + "analysis": { + "analyzer": { + "rebuilt_standard": { + "tokenizer": "standard", + "filter": [ + "standard", + "lowercase" <1> + ] + } + } + } + } +} +---------------------------------------------------- +// CONSOLE +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: standard_example, first: standard, second: rebuilt_standard}\nendyaml\n/] +<1> You'd add any token filters after `lowercase`. diff --git a/docs/reference/analysis/analyzers/stop-analyzer.asciidoc b/docs/reference/analysis/analyzers/stop-analyzer.asciidoc index eacc7e106e7..1b84797d947 100644 --- a/docs/reference/analysis/analyzers/stop-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/stop-analyzer.asciidoc @@ -5,17 +5,6 @@ The `stop` analyzer is the same as the <> - -Token filters:: -* <> - [float] === Example output @@ -239,3 +228,50 @@ The above example produces the following terms: --------------------------- [ quick, brown, foxes, jumped, lazy, dog, s, bone ] --------------------------- + +[float] +=== Definition + +It consists of: + +Tokenizer:: +* <> + +Token filters:: +* <> + +If you need to customize the `stop` analyzer beyond the configuration +parameters then you need to recreate it as a `custom` analyzer and modify +it, usually by adding token filters. This would recreate the built-in +`stop` analyzer and you can use it as a starting point for further +customization: + +[source,js] +---------------------------------------------------- +PUT /stop_example +{ + "settings": { + "analysis": { + "filter": { + "english_stop": { + "type": "stop", + "stopwords": "_english_" <1> + } + }, + "analyzer": { + "rebuilt_stop": { + "tokenizer": "lowercase", + "filter": [ + "english_stop" <2> + ] + } + } + } + } +} +---------------------------------------------------- +// CONSOLE +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: stop_example, first: stop, second: rebuilt_stop}\nendyaml\n/] +<1> The default stopwords can be overridden with the `stopwords` + or `stopwords_path` parameters. +<2> You'd add any token filters after `english_stop`. diff --git a/docs/reference/analysis/analyzers/whitespace-analyzer.asciidoc b/docs/reference/analysis/analyzers/whitespace-analyzer.asciidoc index f95e5c6e4ab..31ba8d9ce8f 100644 --- a/docs/reference/analysis/analyzers/whitespace-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/whitespace-analyzer.asciidoc @@ -4,14 +4,6 @@ The `whitespace` analyzer breaks text into terms whenever it encounters a whitespace character. -[float] -=== Definition - -It consists of: - -Tokenizer:: -* <> - [float] === Example output @@ -120,3 +112,37 @@ The above sentence would produce the following terms: === Configuration The `whitespace` analyzer is not configurable. + +[float] +=== Definition + +It consists of: + +Tokenizer:: +* <> + +If you need to customize the `whitespace` analyzer then you need to +recreate it as a `custom` analyzer and modify it, usually by adding +token filters. This would recreate the built-in `whitespace` analyzer +and you can use it as a starting point for further customization: + +[source,js] +---------------------------------------------------- +PUT /whitespace_example +{ + "settings": { + "analysis": { + "analyzer": { + "rebuilt_whitespace": { + "tokenizer": "whitespace", + "filter": [ <1> + ] + } + } + } + } +} +---------------------------------------------------- +// CONSOLE +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: whitespace_example, first: whitespace, second: rebuilt_whitespace}\nendyaml\n/] +<1> You'd add any token filters here. From 21d67d1bd7a2d2523dec1ec6f3997d49498ad646 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Mon, 14 May 2018 15:49:00 -0700 Subject: [PATCH 22/31] [DOCS] Adds release highlight pages (#30590) --- docs/reference/index-shared4.asciidoc | 2 ++ .../release-notes/highlights-7.0.0.asciidoc | 9 +++++++++ docs/reference/release-notes/highlights.asciidoc | 13 +++++++++++++ 3 files changed, 24 insertions(+) create mode 100644 docs/reference/release-notes/highlights-7.0.0.asciidoc create mode 100644 docs/reference/release-notes/highlights.asciidoc diff --git a/docs/reference/index-shared4.asciidoc b/docs/reference/index-shared4.asciidoc index 5e6ebc8a5a2..3dfb3b64189 100644 --- a/docs/reference/index-shared4.asciidoc +++ b/docs/reference/index-shared4.asciidoc @@ -5,4 +5,6 @@ include::testing.asciidoc[] include::glossary.asciidoc[] +include::release-notes/highlights.asciidoc[] + include::{docdir}/../CHANGELOG.asciidoc[] \ No newline at end of file diff --git a/docs/reference/release-notes/highlights-7.0.0.asciidoc b/docs/reference/release-notes/highlights-7.0.0.asciidoc new file mode 100644 index 00000000000..1ea3d3fa329 --- /dev/null +++ b/docs/reference/release-notes/highlights-7.0.0.asciidoc @@ -0,0 +1,9 @@ +[[release-highlights-7.0.0]] +== 7.0.0 release highlights +++++ +7.0.0 +++++ + +coming[7.0.0] + +See also <> and <>. diff --git a/docs/reference/release-notes/highlights.asciidoc b/docs/reference/release-notes/highlights.asciidoc new file mode 100644 index 00000000000..1223e9a685a --- /dev/null +++ b/docs/reference/release-notes/highlights.asciidoc @@ -0,0 +1,13 @@ +[[release-highlights]] += {es} Release Highlights + +[partintro] +-- +This section summarizes the most important changes in each release. For the +full list, see <> and <>. + +* <> + +-- + +include::highlights-7.0.0.asciidoc[] \ No newline at end of file From 15790e1b56b4fd34c93223236e475b72317b32c6 Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Tue, 15 May 2018 02:14:35 +0300 Subject: [PATCH 23/31] Silence IndexUpgradeIT test failures. (#30430) --- .../java/org/elasticsearch/xpack/upgrade/IndexUpgradeIT.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeIT.java b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeIT.java index ef5c3acc3d2..9f1fb95ed48 100644 --- a/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeIT.java +++ b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeIT.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.upgrade; +import org.apache.lucene.util.LuceneTestCase.AwaitsFix; import org.elasticsearch.Build; import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.action.search.SearchResponse; @@ -30,6 +31,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThro import static org.hamcrest.Matchers.empty; import static org.hamcrest.core.IsEqual.equalTo; +@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/30430") public class IndexUpgradeIT extends IndexUpgradeIntegTestCase { @Before From 6517ac98eb060baa63fe1e843f8526a117fcdcae Mon Sep 17 00:00:00 2001 From: Tim Vernum Date: Tue, 15 May 2018 09:57:34 +1000 Subject: [PATCH 24/31] Fail if reading from closed KeyStoreWrapper (#30394) In #28255 the implementation of the elasticsearch.keystore was changed to no longer be built on top of a PKCS#12 keystore. A side effect of that change was that calling getString or getFile on a closed KeyStoreWrapper ceased to throw an exception, and would instead return a value consisting of all 0 bytes. This change restores the previous behaviour as closely as possible. It is possible to retrieve the _keys_ from a closed keystore, but any attempt to get or set the entries will throw an IllegalStateException. --- .../common/settings/KeyStoreWrapper.java | 42 ++++++++++++------- .../common/settings/KeyStoreWrapperTests.java | 15 +++++++ 2 files changed, 43 insertions(+), 14 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java b/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java index 04bbb9279da..f47760491f8 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java +++ b/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java @@ -158,6 +158,7 @@ public class KeyStoreWrapper implements SecureSettings { /** The decrypted secret data. See {@link #decrypt(char[])}. */ private final SetOnce> entries = new SetOnce<>(); + private volatile boolean closed; private KeyStoreWrapper(int formatVersion, boolean hasPassword, byte[] dataBytes) { this.formatVersion = formatVersion; @@ -448,8 +449,8 @@ public class KeyStoreWrapper implements SecureSettings { } /** Write the keystore to the given config directory. */ - public void save(Path configDir, char[] password) throws Exception { - assert isLoaded(); + public synchronized void save(Path configDir, char[] password) throws Exception { + ensureOpen(); SimpleFSDirectory directory = new SimpleFSDirectory(configDir); // write to tmp file first, then overwrite @@ -500,16 +501,22 @@ public class KeyStoreWrapper implements SecureSettings { } } + /** + * It is possible to retrieve the setting names even if the keystore is closed. + * This allows {@link SecureSetting} to correctly determine that a entry exists even though it cannot be read. Thus attempting to + * read a secure setting after the keystore is closed will generate a "keystore is closed" exception rather than using the fallback + * setting. + */ @Override public Set getSettingNames() { - assert isLoaded(); + assert entries.get() != null : "Keystore is not loaded"; return entries.get().keySet(); } // TODO: make settings accessible only to code that registered the setting @Override - public SecureString getString(String setting) { - assert isLoaded(); + public synchronized SecureString getString(String setting) { + ensureOpen(); Entry entry = entries.get().get(setting); if (entry == null || entry.type != EntryType.STRING) { throw new IllegalArgumentException("Secret setting " + setting + " is not a string"); @@ -520,13 +527,12 @@ public class KeyStoreWrapper implements SecureSettings { } @Override - public InputStream getFile(String setting) { - assert isLoaded(); + public synchronized InputStream getFile(String setting) { + ensureOpen(); Entry entry = entries.get().get(setting); if (entry == null || entry.type != EntryType.FILE) { throw new IllegalArgumentException("Secret setting " + setting + " is not a file"); } - return new ByteArrayInputStream(entry.bytes); } @@ -543,8 +549,8 @@ public class KeyStoreWrapper implements SecureSettings { } /** Set a string setting. */ - void setString(String setting, char[] value) { - assert isLoaded(); + synchronized void setString(String setting, char[] value) { + ensureOpen(); validateSettingName(setting); ByteBuffer byteBuffer = StandardCharsets.UTF_8.encode(CharBuffer.wrap(value)); @@ -556,8 +562,8 @@ public class KeyStoreWrapper implements SecureSettings { } /** Set a file setting. */ - void setFile(String setting, byte[] bytes) { - assert isLoaded(); + synchronized void setFile(String setting, byte[] bytes) { + ensureOpen(); validateSettingName(setting); Entry oldEntry = entries.get().put(setting, new Entry(EntryType.FILE, Arrays.copyOf(bytes, bytes.length))); @@ -568,15 +574,23 @@ public class KeyStoreWrapper implements SecureSettings { /** Remove the given setting from the keystore. */ void remove(String setting) { - assert isLoaded(); + ensureOpen(); Entry oldEntry = entries.get().remove(setting); if (oldEntry != null) { Arrays.fill(oldEntry.bytes, (byte)0); } } + private void ensureOpen() { + if (closed) { + throw new IllegalStateException("Keystore is closed"); + } + assert isLoaded() : "Keystore is not loaded"; + } + @Override - public void close() { + public synchronized void close() { + this.closed = true; for (Entry entry : entries.get().values()) { Arrays.fill(entry.bytes, (byte)0); } diff --git a/server/src/test/java/org/elasticsearch/common/settings/KeyStoreWrapperTests.java b/server/src/test/java/org/elasticsearch/common/settings/KeyStoreWrapperTests.java index e2283608736..849841943ec 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/KeyStoreWrapperTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/KeyStoreWrapperTests.java @@ -48,11 +48,13 @@ import org.elasticsearch.common.Randomness; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.Environment; import org.elasticsearch.test.ESTestCase; +import org.hamcrest.Matchers; import org.junit.After; import org.junit.Before; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.instanceOf; public class KeyStoreWrapperTests extends ESTestCase { @@ -97,6 +99,19 @@ public class KeyStoreWrapperTests extends ESTestCase { assertTrue(keystore.getSettingNames().contains(KeyStoreWrapper.SEED_SETTING.getKey())); } + public void testCannotReadStringFromClosedKeystore() throws Exception { + KeyStoreWrapper keystore = KeyStoreWrapper.create(); + assertThat(keystore.getSettingNames(), Matchers.hasItem(KeyStoreWrapper.SEED_SETTING.getKey())); + assertThat(keystore.getString(KeyStoreWrapper.SEED_SETTING.getKey()), notNullValue()); + + keystore.close(); + + assertThat(keystore.getSettingNames(), Matchers.hasItem(KeyStoreWrapper.SEED_SETTING.getKey())); + final IllegalStateException exception = expectThrows(IllegalStateException.class, + () -> keystore.getString(KeyStoreWrapper.SEED_SETTING.getKey())); + assertThat(exception.getMessage(), containsString("closed")); + } + public void testUpgradeNoop() throws Exception { KeyStoreWrapper keystore = KeyStoreWrapper.create(); SecureString seed = keystore.getString(KeyStoreWrapper.SEED_SETTING.getKey()); From 848f2409264618e09b5a3add95623d33805e29c0 Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Mon, 14 May 2018 19:19:53 -0600 Subject: [PATCH 25/31] Fix issue with finishing handshake in ssl driver (#30580) This is fixing an issue that has come up in some builds. In some scenarios I see an assertion failure that we are trying to move to application mode when we are not in handshake mode. What I think is happening is that we are in handshake mode and have received the completed handshake message AND an application message. While reading in handshake mode we switch to application mode. However, there is still data to be consumed so we attempt to continue to read in handshake mode. This leads to us attempting to move to application mode again throwing an assertion. This commit fixes this by immediatly exiting the handshake mode read method if we are not longer in handshake mode. Additionally if we swap modes during a read we attempt to read with the new mode to see if there is data that needs to be handled. --- .../xpack/security/transport/nio/SSLDriver.java | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SSLDriver.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SSLDriver.java index a44d39a0d7a..c143978468d 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SSLDriver.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SSLDriver.java @@ -113,7 +113,13 @@ public class SSLDriver implements AutoCloseable { } public void read(InboundChannelBuffer buffer) throws SSLException { - currentMode.read(buffer); + Mode modePriorToRead; + do { + modePriorToRead = currentMode; + currentMode.read(buffer); + // If we switched modes we want to read again as there might be unhandled bytes that need to be + // handled by the new mode. + } while (modePriorToRead != currentMode); } public boolean readyForApplicationWrites() { @@ -365,8 +371,9 @@ public class SSLDriver implements AutoCloseable { try { SSLEngineResult result = unwrap(buffer); handshakeStatus = result.getHandshakeStatus(); - continueUnwrap = result.bytesConsumed() > 0; handshake(); + // If we are done handshaking we should exit the handshake read + continueUnwrap = result.bytesConsumed() > 0 && currentMode.isHandshake(); } catch (SSLException e) { closingInternal(); throw e; From 0f85c6429cff3f369383d256bc63c05df07bd88c Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Mon, 14 May 2018 21:57:08 -0400 Subject: [PATCH 26/31] Remove the changelog (#30593) We are starting over on the changelog with a different approach. This commit removes the existing incarnation of the changelog to remove confusion that we need to continue adding entries to it. --- docs/CHANGELOG.asciidoc | 257 ---------------------------------------- 1 file changed, 257 deletions(-) delete mode 100644 docs/CHANGELOG.asciidoc diff --git a/docs/CHANGELOG.asciidoc b/docs/CHANGELOG.asciidoc deleted file mode 100644 index 6eb26fde8f9..00000000000 --- a/docs/CHANGELOG.asciidoc +++ /dev/null @@ -1,257 +0,0 @@ -[[es-release-notes]] -= {es} Release Notes - -[partintro] --- -// To add a release, copy and paste the template text -// and add a link to the new section. Note that release subheads must -// be floated and sections cannot be empty. - -// Use these for links to issue and pulls. Note issues and pulls redirect one to -// each other on Github, so don't worry too much on using the right prefix. -:issue: https://github.com/elastic/elasticsearch/issues/ -:pull: https://github.com/elastic/elasticsearch/pull/ - -This section summarizes the changes in each release. - -* <> -* <> -* <> - --- - -//// -// To add a release, copy and paste the following text, uncomment the relevant -// sections, and add a link to the new section in the list of releases at the -// top of the page. Note that release subheads must be floated and sections -// cannot be empty. -// TEMPLATE: - -// [[release-notes-n.n.n]] -// == {es} n.n.n - -//[float] -[[breaking-n.n.n]] -//=== Breaking Changes - -//[float] -//=== Breaking Java Changes - -//[float] -//=== Deprecations - -//[float] -//=== New Features - -//[float] -//=== Enhancements - -//[float] -//=== Bug Fixes - -//[float] -//=== Regressions - -//[float] -//=== Known Issues - -//// - -[[release-notes-7.0.0]] -== {es} 7.0.0 - -coming[7.0.0] - -[float] -[[breaking-7.0.0]] -=== Breaking Changes - -<> ({pull}29609[#29609]) - -<> ({pull}29004[#29004]) -<> ({pull}29635[#29635]) - -<> ({pull}30185[#30185]) - -Machine Learning:: -* The `max_running_jobs` node property is removed in this release. Use the -`xpack.ml.max_open_jobs` setting instead. For more information, see <>. - -* <> ({pull}29601[#29601]) - -//[float] -//=== Breaking Java Changes - -[float] -=== Deprecations -Monitoring:: -* The `xpack.monitoring.collection.interval` setting can no longer be set to `-1` -to disable monitoring data collection. Use `xpack.monitoring.collection.enabled` -and set it to `false` (its default), which was added in 6.3.0. - -Security:: -* The fields returned as part of the mappings section by get index, get -mappings, get field mappings, and field capabilities API are now only the -ones that the user is authorized to access in case field level security is enabled. - -//[float] -//=== New Features - -//[float] -//=== Enhancements - -[float] -=== Bug Fixes - -Use date format in `date_range` mapping before fallback to default ({pull}29310[#29310]) - -Fix NPE in 'more_like_this' when field has zero tokens ({pull}30365[#30365]) - -Fixed prerelease version of elasticsearch in the `deb` package to sort before GA versions -({pull}29000[#29000]) - -Rollup:: -* Validate timezone in range queries to ensure they match the selected job when -searching ({pull}30338[#30338]) - -SQL:: -* Fix parsing of Dates containing milliseconds ({pull}30419[#30419]) - -[float] -=== Regressions -Fail snapshot operations early when creating or deleting a snapshot on a repository that has been -written to by an older Elasticsearch after writing to it with a newer Elasticsearch version. ({pull}30140[#30140]) - -Fix NPE when CumulativeSum agg encounters null value/empty bucket ({pull}29641[#29641]) -Do not fail snapshot when deleting a missing snapshotted file ({pull}30332[#30332]) - -//[float] -//=== Regressions - -//[float] -//=== Known Issues - -[[release-notes-6.4.0]] -== {es} 6.4.0 - -coming[6.4.0] - -//[float] -[[breaking-6.4.0]] -//=== Breaking Changes - -//[float] -//=== Breaking Java Changes - -[float] -=== Deprecations - -Deprecated multi-argument versions of the request methods in the RestClient. -Prefer the "Request" object flavored methods. ({pull}30315[#30315]) - -[float] -=== New Features - -The new <> field allows to know which fields -got ignored at index time because of the <> -option. ({pull}30140[#29658]) - -A new analysis plugin called `analysis_nori` that exposes the Lucene Korean -analysis module. ({pull}30397[#30397]) - -[float] -=== Enhancements - -{ref-64}/breaking_64_api_changes.html#copy-source-settings-on-resize[Allow -copying source settings on index resize operations] ({pull}30255[#30255], {pull}30404[#30404]) - -Added new "Request" object flavored request methods in the RestClient. Prefer -these instead of the multi-argument versions. ({pull}29623[#29623]) - -Added `setJsonEntity` to `Request` object so it is marginally easier to send JSON. ({pull}30447[#30447]) -Watcher HTTP client used in watches now allows more parallel connections to the -same endpoint and evicts long running connections. ({pull}30130[#30130]) - -The cluster state listener to decide if watcher should be -stopped/started/paused now runs far less code in an executor but is more -synchronous and predictable. Also the trigger engine thread is only started on -data nodes. And the Execute Watch API can be triggered regardless is watcher is -started or stopped. ({pull}30118[#30118]) - -Added put index template API to the high level rest client ({pull}30400[#30400]) - -Add ability to filter coordinating-only nodes when interacting with cluster -APIs. ({pull}30313[#30313]) - -[float] -=== Bug Fixes - -Use date format in `date_range` mapping before fallback to default ({pull}29310[#29310]) - -Fix NPE in 'more_like_this' when field has zero tokens ({pull}30365[#30365]) - -Do not ignore request analysis/similarity settings on index resize operations when the source index already contains such settings ({pull}30216[#30216]) - -Fix NPE when CumulativeSum agg encounters null value/empty bucket ({pull}29641[#29641]) - -Machine Learning:: - -* Account for gaps in data counts after job is reopened ({pull}30294[#30294]) - -Add validation that geohashes are not empty and don't contain unsupported characters ({pull}30376[#30376]) - -Rollup:: -* Validate timezone in range queries to ensure they match the selected job when -searching ({pull}30338[#30338]) - -SQL:: -* Fix parsing of Dates containing milliseconds ({pull}30419[#30419]) - -Allocation:: - -Auto-expand replicas when adding or removing nodes to prevent shard copies from -being dropped and resynced when a data node rejoins the cluster ({pull}30423[#30423]) - -//[float] -//=== Regressions - -//[float] -//=== Known Issues - -[[release-notes-6.3.1]] -== Elasticsearch version 6.3.1 - -coming[6.3.1] - -//[float] -[[breaking-6.3.1]] -//=== Breaking Changes - -//[float] -//=== Breaking Java Changes - -//[float] -//=== Deprecations - -//[float] -//=== New Features - -//[float] -//=== Enhancements - -[float] -=== Bug Fixes - -Reduce the number of object allocations made by {security} when resolving the indices and aliases for a request ({pull}30180[#30180]) - -Respect accept header on requests with no handler ({pull}30383[#30383]) - -SQL:: -* Fix parsing of Dates containing milliseconds ({pull}30419[#30419]) - -//[float] -//=== Regressions - -//[float] -//=== Known Issues From 7dd816e77c761afacb0de662241a410e8f7618db Mon Sep 17 00:00:00 2001 From: Tim Vernum Date: Tue, 15 May 2018 14:28:29 +1000 Subject: [PATCH 27/31] Update build file due to doc file rename A file with uncoverted snippets was change as part of 7f47ff9, but build.gradle was not updated to reflect the rename. --- x-pack/docs/build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/docs/build.gradle b/x-pack/docs/build.gradle index ab9bc994599..ede446d6074 100644 --- a/x-pack/docs/build.gradle +++ b/x-pack/docs/build.gradle @@ -81,7 +81,7 @@ buildRestTests.expectedUnconvertedCandidates = [ 'en/rest-api/ml/validate-job.asciidoc', 'en/rest-api/security/authenticate.asciidoc', 'en/rest-api/watcher/stats.asciidoc', - 'en/security/authorization.asciidoc', + 'en/security/authorization/overview.asciidoc', 'en/watcher/example-watches/watching-time-series-data.asciidoc', ] From 50c34b2a9bbdca25d04b624cccd320a8428d2309 Mon Sep 17 00:00:00 2001 From: David Roberts Date: Tue, 15 May 2018 09:02:38 +0100 Subject: [PATCH 28/31] [ML] Reverse engineer Grok patterns from categorization results (#30125) This change adds a grok_pattern field to the GET categories API output in ML. It's calculated using the regex and examples in the categorization result, and applying a list of candidate Grok patterns to the bits in between the tokens that are considered to define the category. This can currently be considered a prototype, as the Grok patterns it produces are not optimal. However, enough people have said it would be useful for it to be worthwhile exposing it as experimental functionality for interested parties to try out. --- .../docs/en/rest-api/ml/get-category.asciidoc | 24 +- .../en/rest-api/ml/resultsresource.asciidoc | 7 + .../ml/job/results/CategoryDefinition.java | 26 +- x-pack/plugin/ml/build.gradle | 1 + .../action/TransportGetCategoriesAction.java | 2 +- .../categorization/GrokPatternCreator.java | 243 ++++++++++++++++++ .../xpack/ml/job/persistence/JobProvider.java | 20 +- .../AutodetectResultProcessorIT.java | 2 +- .../GrokPatternCreatorTests.java | 232 +++++++++++++++++ .../ml/job/persistence/JobProviderTests.java | 44 ++-- .../job/results/CategoryDefinitionTests.java | 3 + 11 files changed, 561 insertions(+), 43 deletions(-) create mode 100644 x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/categorization/GrokPatternCreator.java create mode 100644 x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/categorization/GrokPatternCreatorTests.java diff --git a/x-pack/docs/en/rest-api/ml/get-category.asciidoc b/x-pack/docs/en/rest-api/ml/get-category.asciidoc index 37d0a95c14c..9e69083355b 100644 --- a/x-pack/docs/en/rest-api/ml/get-category.asciidoc +++ b/x-pack/docs/en/rest-api/ml/get-category.asciidoc @@ -62,11 +62,11 @@ roles provide these privileges. For more information, see ==== Examples The following example gets information about one category for the -`it_ops_new_logs` job: +`esxi_log` job: [source,js] -------------------------------------------------- -GET _xpack/ml/anomaly_detectors/it_ops_new_logs/results/categories +GET _xpack/ml/anomaly_detectors/esxi_log/results/categories { "page":{ "size": 1 @@ -83,14 +83,18 @@ In this example, the API returns the following information: "count": 11, "categories": [ { - "job_id": "it_ops_new_logs", - "category_id": 1, - "terms": "Actual Transaction Already Voided Reversed hostname dbserver.acme.com physicalhost esxserver1.acme.com vmhost app1.acme.com", - "regex": ".*?Actual.+?Transaction.+?Already.+?Voided.+?Reversed.+?hostname.+?dbserver.acme.com.+?physicalhost.+?esxserver1.acme.com.+?vmhost.+?app1.acme.com.*", - "max_matching_length": 137, - "examples": [ - "Actual Transaction Already Voided / Reversed;hostname=dbserver.acme.com;physicalhost=esxserver1.acme.com;vmhost=app1.acme.com" - ] + "job_id" : "esxi_log", + "category_id" : 1, + "terms" : "Vpxa verbose vpxavpxaInvtVm opID VpxaInvtVmChangeListener Guest DiskInfo Changed", + "regex" : ".*?Vpxa.+?verbose.+?vpxavpxaInvtVm.+?opID.+?VpxaInvtVmChangeListener.+?Guest.+?DiskInfo.+?Changed.*", + "max_matching_length": 154, + "examples" : [ + "Oct 19 17:04:44 esxi1.acme.com Vpxa: [3CB3FB90 verbose 'vpxavpxaInvtVm' opID=WFU-33d82c31] [VpxaInvtVmChangeListener] Guest DiskInfo Changed", + "Oct 19 17:04:45 esxi2.acme.com Vpxa: [3CA66B90 verbose 'vpxavpxaInvtVm' opID=WFU-33927856] [VpxaInvtVmChangeListener] Guest DiskInfo Changed", + "Oct 19 17:04:51 esxi1.acme.com Vpxa: [FFDBAB90 verbose 'vpxavpxaInvtVm' opID=WFU-25e0d447] [VpxaInvtVmChangeListener] Guest DiskInfo Changed", + "Oct 19 17:04:58 esxi2.acme.com Vpxa: [FFDDBB90 verbose 'vpxavpxaInvtVm' opID=WFU-bbff0134] [VpxaInvtVmChangeListener] Guest DiskInfo Changed" + ], + "grok_pattern" : ".*?%{SYSLOGTIMESTAMP:timestamp}.+?Vpxa.+?%{BASE16NUM:field}.+?verbose.+?vpxavpxaInvtVm.+?opID.+?VpxaInvtVmChangeListener.+?Guest.+?DiskInfo.+?Changed.*" } ] } diff --git a/x-pack/docs/en/rest-api/ml/resultsresource.asciidoc b/x-pack/docs/en/rest-api/ml/resultsresource.asciidoc index fba6522141b..c28ed72aedb 100644 --- a/x-pack/docs/en/rest-api/ml/resultsresource.asciidoc +++ b/x-pack/docs/en/rest-api/ml/resultsresource.asciidoc @@ -405,6 +405,13 @@ A category resource has the following properties: `examples`:: (array) A list of examples of actual values that matched the category. +`grok_pattern`:: + experimental[] (string) A Grok pattern that could be used in Logstash or an + Ingest Pipeline to extract fields from messages that match the category. This + field is experimental and may be changed or removed in a future release. The + Grok patterns that are found are not optimal, but are often a good starting + point for manual tweaking. + `job_id`:: (string) The unique identifier for the job that these results belong to. diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/CategoryDefinition.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/CategoryDefinition.java index 98c38241856..90d01f66f63 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/CategoryDefinition.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/CategoryDefinition.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.core.ml.job.results; +import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -34,6 +35,7 @@ public class CategoryDefinition implements ToXContentObject, Writeable { public static final ParseField REGEX = new ParseField("regex"); public static final ParseField MAX_MATCHING_LENGTH = new ParseField("max_matching_length"); public static final ParseField EXAMPLES = new ParseField("examples"); + public static final ParseField GROK_PATTERN = new ParseField("grok_pattern"); // Used for QueryPage public static final ParseField RESULTS_FIELD = new ParseField("categories"); @@ -51,6 +53,7 @@ public class CategoryDefinition implements ToXContentObject, Writeable { parser.declareString(CategoryDefinition::setRegex, REGEX); parser.declareLong(CategoryDefinition::setMaxMatchingLength, MAX_MATCHING_LENGTH); parser.declareStringArray(CategoryDefinition::setExamples, EXAMPLES); + parser.declareString(CategoryDefinition::setGrokPattern, GROK_PATTERN); return parser; } @@ -61,6 +64,7 @@ public class CategoryDefinition implements ToXContentObject, Writeable { private String regex = ""; private long maxMatchingLength = 0L; private final Set examples; + private String grokPattern; public CategoryDefinition(String jobId) { this.jobId = jobId; @@ -74,6 +78,9 @@ public class CategoryDefinition implements ToXContentObject, Writeable { regex = in.readString(); maxMatchingLength = in.readLong(); examples = new TreeSet<>(in.readList(StreamInput::readString)); + if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + grokPattern = in.readOptionalString(); + } } @Override @@ -84,6 +91,9 @@ public class CategoryDefinition implements ToXContentObject, Writeable { out.writeString(regex); out.writeLong(maxMatchingLength); out.writeStringList(new ArrayList<>(examples)); + if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + out.writeOptionalString(grokPattern); + } } public String getJobId() { @@ -139,6 +149,14 @@ public class CategoryDefinition implements ToXContentObject, Writeable { examples.add(example); } + public String getGrokPattern() { + return grokPattern; + } + + public void setGrokPattern(String grokPattern) { + this.grokPattern = grokPattern; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -148,6 +166,9 @@ public class CategoryDefinition implements ToXContentObject, Writeable { builder.field(REGEX.getPreferredName(), regex); builder.field(MAX_MATCHING_LENGTH.getPreferredName(), maxMatchingLength); builder.field(EXAMPLES.getPreferredName(), examples); + if (grokPattern != null) { + builder.field(GROK_PATTERN.getPreferredName(), grokPattern); + } builder.endObject(); return builder; } @@ -166,11 +187,12 @@ public class CategoryDefinition implements ToXContentObject, Writeable { && Objects.equals(this.terms, that.terms) && Objects.equals(this.regex, that.regex) && Objects.equals(this.maxMatchingLength, that.maxMatchingLength) - && Objects.equals(this.examples, that.examples); + && Objects.equals(this.examples, that.examples) + && Objects.equals(this.grokPattern, that.grokPattern); } @Override public int hashCode() { - return Objects.hash(jobId, categoryId, terms, regex, maxMatchingLength, examples); + return Objects.hash(jobId, categoryId, terms, regex, maxMatchingLength, examples, grokPattern); } } diff --git a/x-pack/plugin/ml/build.gradle b/x-pack/plugin/ml/build.gradle index d9d4882b00e..8b991555c06 100644 --- a/x-pack/plugin/ml/build.gradle +++ b/x-pack/plugin/ml/build.gradle @@ -46,6 +46,7 @@ dependencies { testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') // ml deps + compile project(':libs:grok') compile 'net.sf.supercsv:super-csv:2.4.0' nativeBundle "org.elasticsearch.ml:ml-cpp:${project.version}@zip" testCompile 'org.ini4j:ini4j:0.5.2' diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCategoriesAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCategoriesAction.java index 25d0cc0cdf8..abf3a330529 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCategoriesAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCategoriesAction.java @@ -41,7 +41,7 @@ public class TransportGetCategoriesAction extends HandledTransportAction listener.onResponse(new GetCategoriesAction.Response(r)), listener::onFailure, client); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/categorization/GrokPatternCreator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/categorization/GrokPatternCreator.java new file mode 100644 index 00000000000..04280261b26 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/categorization/GrokPatternCreator.java @@ -0,0 +1,243 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.job.categorization; + +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.grok.Grok; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + + +/** + * Creates Grok patterns that will match all the examples in a given category_definition. + * + * The choice of field names is quite primitive. The intention is that a human will edit these. + */ +public final class GrokPatternCreator { + + private static String PREFACE = "preface"; + private static String EPILOGUE = "epilogue"; + + /** + * The first match in this list will be chosen, so it needs to be ordered + * such that more generic patterns come after more specific patterns. + */ + private static final List ORDERED_CANDIDATE_GROK_PATTERNS = Arrays.asList( + new GrokPatternCandidate("TIMESTAMP_ISO8601", "timestamp"), + new GrokPatternCandidate("DATESTAMP_RFC822", "timestamp"), + new GrokPatternCandidate("DATESTAMP_RFC2822", "timestamp"), + new GrokPatternCandidate("DATESTAMP_OTHER", "timestamp"), + new GrokPatternCandidate("DATESTAMP_EVENTLOG", "timestamp"), + new GrokPatternCandidate("SYSLOGTIMESTAMP", "timestamp"), + new GrokPatternCandidate("HTTPDATE", "timestamp"), + new GrokPatternCandidate("CATALINA_DATESTAMP", "timestamp"), + new GrokPatternCandidate("TOMCAT_DATESTAMP", "timestamp"), + new GrokPatternCandidate("CISCOTIMESTAMP", "timestamp"), + new GrokPatternCandidate("DATE", "date"), + new GrokPatternCandidate("TIME", "time"), + new GrokPatternCandidate("LOGLEVEL", "loglevel"), + new GrokPatternCandidate("URI", "uri"), + new GrokPatternCandidate("UUID", "uuid"), + new GrokPatternCandidate("MAC", "macaddress"), + // Can't use \b as the breaks, because slashes are not "word" characters + new GrokPatternCandidate("PATH", "path", "(? examples) { + + // The first string in this array will end up being the empty string, and it doesn't correspond + // to an "in between" bit. Although it could be removed for "neatness", it actually makes the + // loops below slightly neater if it's left in. + // + // E.g., ".*?cat.+?sat.+?mat.*" -> [ "", "cat", "sat", "mat" ] + String[] fixedRegexBits = regex.split("\\.[*+]\\??"); + + // Create a pattern that will capture the bits in between the fixed parts of the regex + // + // E.g., ".*?cat.+?sat.+?mat.*" -> Pattern (.*?)cat(.+?)sat(.+?)mat(.*) + Pattern exampleProcessor = Pattern.compile(regex.replaceAll("(\\.[*+]\\??)", "($1)"), Pattern.DOTALL); + + List> groupsMatchesFromExamples = new ArrayList<>(fixedRegexBits.length); + for (int i = 0; i < fixedRegexBits.length; ++i) { + groupsMatchesFromExamples.add(new ArrayList<>(examples.size())); + } + for (String example : examples) { + Matcher matcher = exampleProcessor.matcher(example); + if (matcher.matches()) { + assert matcher.groupCount() == fixedRegexBits.length; + // E.g., if the input regex was ".*?cat.+?sat.+?mat.*" then the example + // "the cat sat on the mat" will result in "the ", " ", " on the ", and "" + // being added to the 4 "in between" collections in that order + for (int groupNum = 1; groupNum <= matcher.groupCount(); ++groupNum) { + groupsMatchesFromExamples.get(groupNum - 1).add(matcher.group(groupNum)); + } + } else { + // We should never get here. If we do it implies a bug in the original categorization, + // as it's produced a regex that doesn't match the examples. + assert matcher.matches() : exampleProcessor.pattern() + " did not match " + example; + Loggers.getLogger(GrokPatternCreator.class).error("[{}] Pattern [{}] did not match example [{}]", jobId, + exampleProcessor.pattern(), example); + } + } + + Map fieldNameCountStore = new HashMap<>(); + StringBuilder overallGrokPatternBuilder = new StringBuilder(); + // Finally, for each collection of "in between" bits we look for the best Grok pattern and incorporate + // it into the overall Grok pattern that will match the each example in its entirety + for (int inBetweenBitNum = 0; inBetweenBitNum < groupsMatchesFromExamples.size(); ++inBetweenBitNum) { + // Remember (from the first comment in this method) that the first element in this array is + // always the empty string + overallGrokPatternBuilder.append(fixedRegexBits[inBetweenBitNum]); + appendBestGrokMatchForStrings(fieldNameCountStore, overallGrokPatternBuilder, inBetweenBitNum == 0, + inBetweenBitNum == fixedRegexBits.length - 1, groupsMatchesFromExamples.get(inBetweenBitNum)); + } + return overallGrokPatternBuilder.toString(); + } + + /** + * Given a collection of strings, work out which (if any) of the grok patterns we're allowed + * to use matches it best. Then append the appropriate grok language to represent that finding + * onto the supplied string builder. + */ + static void appendBestGrokMatchForStrings(Map fieldNameCountStore, StringBuilder overallGrokPatternBuilder, + boolean isFirst, boolean isLast, Collection mustMatchStrings) { + + GrokPatternCandidate bestCandidate = null; + if (mustMatchStrings.isEmpty() == false) { + for (GrokPatternCandidate candidate : ORDERED_CANDIDATE_GROK_PATTERNS) { + if (mustMatchStrings.stream().allMatch(candidate.grok::match)) { + bestCandidate = candidate; + break; + } + } + } + + if (bestCandidate == null) { + if (isLast) { + overallGrokPatternBuilder.append(".*"); + } else if (isFirst || mustMatchStrings.stream().anyMatch(String::isEmpty)) { + overallGrokPatternBuilder.append(".*?"); + } else { + overallGrokPatternBuilder.append(".+?"); + } + } else { + Collection prefaces = new ArrayList<>(); + Collection epilogues = new ArrayList<>(); + populatePrefacesAndEpilogues(mustMatchStrings, bestCandidate.grok, prefaces, epilogues); + appendBestGrokMatchForStrings(fieldNameCountStore, overallGrokPatternBuilder, isFirst, false, prefaces); + overallGrokPatternBuilder.append("%{").append(bestCandidate.grokPatternName).append(':') + .append(buildFieldName(fieldNameCountStore, bestCandidate.fieldName)).append('}'); + appendBestGrokMatchForStrings(fieldNameCountStore, overallGrokPatternBuilder, false, isLast, epilogues); + } + } + + /** + * Given a collection of strings, and a grok pattern that matches some part of them all, + * return collections of the bits that come before (prefaces) and after (epilogues) the + * bit that matches. + */ + static void populatePrefacesAndEpilogues(Collection matchingStrings, Grok grok, Collection prefaces, + Collection epilogues) { + for (String s : matchingStrings) { + Map captures = grok.captures(s); + // If the pattern doesn't match then captures will be null. But we expect this + // method to only be called after validating that the pattern does match. + assert captures != null; + prefaces.add(captures.getOrDefault(PREFACE, "").toString()); + epilogues.add(captures.getOrDefault(EPILOGUE, "").toString()); + } + } + + /** + * The first time a particular field name is passed, simply return it. + * The second time return it with "2" appended. + * The third time return it with "3" appended. + * Etc. + */ + static String buildFieldName(Map fieldNameCountStore, String fieldName) { + Integer numberSeen = fieldNameCountStore.compute(fieldName, (k, v) -> 1 + ((v == null) ? 0 : v)); + if (numberSeen > 1) { + return fieldName + numberSeen; + } else { + return fieldName; + } + } + + static class GrokPatternCandidate { + + final String grokPatternName; + final String fieldName; + final Grok grok; + + /** + * Pre/post breaks default to \b, but this may not be appropriate for Grok patterns that start or + * end with a non "word" character (i.e. letter, number or underscore). For such patterns use one + * of the other constructors. + * + * In cases where the Grok pattern defined by Logstash already includes conditions on what must + * come before and after the match, use one of the other constructors and specify an empty string + * for the pre and/or post breaks. + * @param grokPatternName Name of the Grok pattern to try to match - must match one defined in Logstash. + * @param fieldName Name of the field to extract from the match. + */ + GrokPatternCandidate(String grokPatternName, String fieldName) { + this(grokPatternName, fieldName, "\\b", "\\b"); + } + + GrokPatternCandidate(String grokPatternName, String fieldName, String preBreak) { + this(grokPatternName, fieldName, preBreak, "\\b"); + } + + /** + * @param grokPatternName Name of the Grok pattern to try to match - must match one defined in Logstash. + * @param fieldName Name of the field to extract from the match. + * @param preBreak Only consider the match if it's broken from the previous text by this. + * @param postBreak Only consider the match if it's broken from the following text by this. + */ + GrokPatternCandidate(String grokPatternName, String fieldName, String preBreak, String postBreak) { + this.grokPatternName = grokPatternName; + this.fieldName = fieldName; + this.grok = new Grok(Grok.getBuiltinPatterns(), "%{DATA:" + PREFACE + "}" + preBreak + "%{" + grokPatternName + ":this}" + + postBreak + "%{GREEDYDATA:" + EPILOGUE + "}"); + } + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobProvider.java index 4b15ef36e6a..d7b10fb622b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobProvider.java @@ -98,6 +98,7 @@ import org.elasticsearch.xpack.core.ml.job.results.Result; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.ml.utils.MlIndicesUtils; import org.elasticsearch.xpack.core.security.support.Exceptions; +import org.elasticsearch.xpack.ml.job.categorization.GrokPatternCreator; import org.elasticsearch.xpack.ml.job.persistence.InfluencersQueryBuilder.InfluencersQuery; import org.elasticsearch.xpack.ml.job.process.autodetect.params.AutodetectParams; @@ -486,7 +487,7 @@ public class JobProvider { } } - private T parseGetHit(GetResponse getResponse, BiFunction objectParser, + private T parseGetHit(GetResponse getResponse, BiFunction objectParser, Consumer errorHandler) { BytesReference source = getResponse.getSourceAsBytesRef(); @@ -626,10 +627,11 @@ public class JobProvider { * Get a page of {@linkplain CategoryDefinition}s for the given jobId. * Uses a supplied client, so may run as the currently authenticated user * @param jobId the job id + * @param augment Should the category definition be augmented with a Grok pattern? * @param from Skip the first N categories. This parameter is for paging * @param size Take only this number of categories */ - public void categoryDefinitions(String jobId, Long categoryId, Integer from, Integer size, + public void categoryDefinitions(String jobId, Long categoryId, boolean augment, Integer from, Integer size, Consumer> handler, Consumer errorHandler, Client client) { if (categoryId != null && (from != null || size != null)) { @@ -663,6 +665,9 @@ public class JobProvider { XContentParser parser = XContentFactory.xContent(XContentHelper.xContentType(source)) .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { CategoryDefinition categoryDefinition = CategoryDefinition.LENIENT_PARSER.apply(parser, null); + if (augment) { + augmentWithGrokPattern(categoryDefinition); + } results.add(categoryDefinition); } catch (IOException e) { throw new ElasticsearchParseException("failed to parse category definition", e); @@ -674,6 +679,17 @@ public class JobProvider { }, e -> errorHandler.accept(mapAuthFailure(e, jobId, GetCategoriesAction.NAME))), client::search); } + void augmentWithGrokPattern(CategoryDefinition categoryDefinition) { + List examples = categoryDefinition.getExamples(); + String regex = categoryDefinition.getRegex(); + if (examples.isEmpty() || regex.isEmpty()) { + categoryDefinition.setGrokPattern(""); + } else { + categoryDefinition.setGrokPattern(GrokPatternCreator.findBestGrokMatchFromExamples(categoryDefinition.getJobId(), + regex, examples)); + } + } + /** * Search for anomaly records with the parameters in the * {@link RecordsQueryBuilder} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectResultProcessorIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectResultProcessorIT.java index 484d1648fbb..09bb3f75916 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectResultProcessorIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectResultProcessorIT.java @@ -461,7 +461,7 @@ public class AutodetectResultProcessorIT extends MlSingleNodeTestCase { AtomicReference errorHolder = new AtomicReference<>(); AtomicReference> resultHolder = new AtomicReference<>(); CountDownLatch latch = new CountDownLatch(1); - jobProvider.categoryDefinitions(JOB_ID, categoryId, null, null, r -> { + jobProvider.categoryDefinitions(JOB_ID, categoryId, false, null, null, r -> { resultHolder.set(r); latch.countDown(); }, e -> { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/categorization/GrokPatternCreatorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/categorization/GrokPatternCreatorTests.java new file mode 100644 index 00000000000..4189dc35f0c --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/categorization/GrokPatternCreatorTests.java @@ -0,0 +1,232 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.job.categorization; + +import org.elasticsearch.grok.Grok; +import org.elasticsearch.test.ESTestCase; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.containsInAnyOrder; + +public class GrokPatternCreatorTests extends ESTestCase { + + public void testBuildFieldName() { + Map fieldNameCountStore = new HashMap<>(); + assertEquals("field", GrokPatternCreator.buildFieldName(fieldNameCountStore, "field")); + assertEquals("field2", GrokPatternCreator.buildFieldName(fieldNameCountStore, "field")); + assertEquals("field3", GrokPatternCreator.buildFieldName(fieldNameCountStore, "field")); + assertEquals("timestamp", GrokPatternCreator.buildFieldName(fieldNameCountStore, "timestamp")); + assertEquals("field4", GrokPatternCreator.buildFieldName(fieldNameCountStore, "field")); + assertEquals("uri", GrokPatternCreator.buildFieldName(fieldNameCountStore, "uri")); + assertEquals("timestamp2", GrokPatternCreator.buildFieldName(fieldNameCountStore, "timestamp")); + assertEquals("field5", GrokPatternCreator.buildFieldName(fieldNameCountStore, "field")); + } + + public void testPopulatePrefacesAndEpiloguesGivenTimestamp() { + + Collection matchingStrings = Arrays.asList("[2018-01-25T15:33:23] DEBUG ", + "[2018-01-24T12:33:23] ERROR ", + "junk [2018-01-22T07:33:23] INFO ", + "[2018-01-21T03:33:23] DEBUG "); + Grok grok = new GrokPatternCreator.GrokPatternCandidate("TIMESTAMP_ISO8601", "timestamp").grok; + Collection prefaces = new ArrayList<>(); + Collection epilogues = new ArrayList<>(); + + GrokPatternCreator.populatePrefacesAndEpilogues(matchingStrings, grok, prefaces, epilogues); + + assertThat(prefaces, containsInAnyOrder("[", "[", "junk [", "[")); + assertThat(epilogues, containsInAnyOrder("] DEBUG ", "] ERROR ", "] INFO ", "] DEBUG ")); + } + + public void testPopulatePrefacesAndEpiloguesGivenEmailAddress() { + + Collection matchingStrings = Arrays.asList("before alice@acme.com after", + "abc bob@acme.com xyz", + "carol@acme.com"); + Grok grok = new GrokPatternCreator.GrokPatternCandidate("EMAILADDRESS", "email").grok; + Collection prefaces = new ArrayList<>(); + Collection epilogues = new ArrayList<>(); + + GrokPatternCreator.populatePrefacesAndEpilogues(matchingStrings, grok, prefaces, epilogues); + + assertThat(prefaces, containsInAnyOrder("before ", "abc ", "")); + assertThat(epilogues, containsInAnyOrder(" after", " xyz", "")); + } + + public void testAppendBestGrokMatchForStringsGivenTimestampsAndLogLevels() { + + Collection mustMatchStrings = Arrays.asList("[2018-01-25T15:33:23] DEBUG ", + "[2018-01-24T12:33:23] ERROR ", + "junk [2018-01-22T07:33:23] INFO ", + "[2018-01-21T03:33:23] DEBUG "); + + Map fieldNameCountStore = new HashMap<>(); + StringBuilder overallGrokPatternBuilder = new StringBuilder(); + + GrokPatternCreator.appendBestGrokMatchForStrings(fieldNameCountStore, overallGrokPatternBuilder, false, false, mustMatchStrings); + + assertEquals(".+?%{TIMESTAMP_ISO8601:timestamp}.+?%{LOGLEVEL:loglevel}.+?", overallGrokPatternBuilder.toString()); + } + + public void testAppendBestGrokMatchForStringsGivenNumbersInBrackets() { + + Collection mustMatchStrings = Arrays.asList("(-2)", + " (-3)", + " (4)", + " (-5) "); + + Map fieldNameCountStore = new HashMap<>(); + StringBuilder overallGrokPatternBuilder = new StringBuilder(); + + GrokPatternCreator.appendBestGrokMatchForStrings(fieldNameCountStore, overallGrokPatternBuilder, false, false, mustMatchStrings); + + assertEquals(".+?%{NUMBER:field}.+?", overallGrokPatternBuilder.toString()); + } + + public void testAppendBestGrokMatchForStringsGivenNegativeNumbersWithoutBreak() { + + Collection mustMatchStrings = Arrays.asList("before-2 ", + "prior to-3", + "-4"); + + Map fieldNameCountStore = new HashMap<>(); + StringBuilder overallGrokPatternBuilder = new StringBuilder(); + + GrokPatternCreator.appendBestGrokMatchForStrings(fieldNameCountStore, overallGrokPatternBuilder, false, false, mustMatchStrings); + + // It seems sensible that we don't detect these suffices as either base 10 or base 16 numbers + assertEquals(".+?", overallGrokPatternBuilder.toString()); + } + + public void testAppendBestGrokMatchForStringsGivenHexNumbers() { + + Collection mustMatchStrings = Arrays.asList(" abc", + " 123", + " -123", + "1f is hex"); + + Map fieldNameCountStore = new HashMap<>(); + StringBuilder overallGrokPatternBuilder = new StringBuilder(); + + GrokPatternCreator.appendBestGrokMatchForStrings(fieldNameCountStore, overallGrokPatternBuilder, false, false, mustMatchStrings); + + assertEquals(".*?%{BASE16NUM:field}.*?", overallGrokPatternBuilder.toString()); + } + + public void testAppendBestGrokMatchForStringsGivenHostnamesWithNumbers() { + + Collection mustMatchStrings = Arrays.asList(" fieldNameCountStore = new HashMap<>(); + StringBuilder overallGrokPatternBuilder = new StringBuilder(); + + GrokPatternCreator.appendBestGrokMatchForStrings(fieldNameCountStore, overallGrokPatternBuilder, false, false, mustMatchStrings); + + // We don't want the .1. in the middle to get detected as a hex number + assertEquals(".+?", overallGrokPatternBuilder.toString()); + } + + public void testAppendBestGrokMatchForStringsGivenEmailAddresses() { + + Collection mustMatchStrings = Arrays.asList("before alice@acme.com after", + "abc bob@acme.com xyz", + "carol@acme.com"); + + Map fieldNameCountStore = new HashMap<>(); + StringBuilder overallGrokPatternBuilder = new StringBuilder(); + + GrokPatternCreator.appendBestGrokMatchForStrings(fieldNameCountStore, overallGrokPatternBuilder, false, false, mustMatchStrings); + + assertEquals(".*?%{EMAILADDRESS:email}.*?", overallGrokPatternBuilder.toString()); + } + + public void testAppendBestGrokMatchForStringsGivenUris() { + + Collection mustMatchStrings = Arrays.asList("main site https://www.elastic.co/ with trailing slash", + "https://www.elastic.co/guide/en/x-pack/current/ml-configuring-categories.html#ml-configuring-categories is a section", + "download today from https://www.elastic.co/downloads"); + + Map fieldNameCountStore = new HashMap<>(); + StringBuilder overallGrokPatternBuilder = new StringBuilder(); + + GrokPatternCreator.appendBestGrokMatchForStrings(fieldNameCountStore, overallGrokPatternBuilder, false, false, mustMatchStrings); + + assertEquals(".*?%{URI:uri}.*?", overallGrokPatternBuilder.toString()); + } + + public void testAppendBestGrokMatchForStringsGivenPaths() { + + Collection mustMatchStrings = Arrays.asList("on Mac /Users/dave", + "on Windows C:\\Users\\dave", + "on Linux /home/dave"); + + Map fieldNameCountStore = new HashMap<>(); + StringBuilder overallGrokPatternBuilder = new StringBuilder(); + + GrokPatternCreator.appendBestGrokMatchForStrings(fieldNameCountStore, overallGrokPatternBuilder, false, false, mustMatchStrings); + + assertEquals(".+?%{PATH:path}.*?", overallGrokPatternBuilder.toString()); + } + + public void testFindBestGrokMatchFromExamplesGivenNamedLogs() { + + String regex = ".*?linux.+?named.+?error.+?unexpected.+?RCODE.+?REFUSED.+?resolving.*"; + Collection examples = Arrays.asList( + "Sep 8 11:55:06 linux named[22529]: error (unexpected RCODE REFUSED) resolving 'elastic.slack.com/A/IN': 95.110.64.205#53", + "Sep 8 11:55:08 linux named[22529]: error (unexpected RCODE REFUSED) resolving 'slack-imgs.com/A/IN': 95.110.64.205#53", + "Sep 8 11:55:35 linux named[22529]: error (unexpected RCODE REFUSED) resolving 'www.elastic.co/A/IN': 95.110.68.206#53", + "Sep 8 11:55:42 linux named[22529]: error (unexpected RCODE REFUSED) resolving 'b.akamaiedge.net/A/IN': 95.110.64.205#53"); + + assertEquals(".*?%{SYSLOGTIMESTAMP:timestamp}.+?linux.+?named.+?%{NUMBER:field}.+?error.+?" + + "unexpected.+?RCODE.+?REFUSED.+?resolving.+?%{QUOTEDSTRING:field2}.+?%{IP:ipaddress}.+?%{NUMBER:field3}.*", + GrokPatternCreator.findBestGrokMatchFromExamples("foo", regex, examples)); + } + + public void testFindBestGrokMatchFromExamplesGivenCatalinaLogs() { + + String regex = ".*?org\\.apache\\.tomcat\\.util\\.http\\.Parameters.+?processParameters.+?WARNING.+?Parameters.+?" + + "Invalid.+?chunk.+?ignored.*"; + // The embedded newline ensures the regular expressions we're using are compiled with Pattern.DOTALL + Collection examples = Arrays.asList( + "Aug 29, 2009 12:03:33 AM org.apache.tomcat.util.http.Parameters processParameters\nWARNING: Parameters: " + + "Invalid chunk ignored.", + "Aug 29, 2009 12:03:40 AM org.apache.tomcat.util.http.Parameters processParameters\nWARNING: Parameters: " + + "Invalid chunk ignored.", + "Aug 29, 2009 12:03:45 AM org.apache.tomcat.util.http.Parameters processParameters\nWARNING: Parameters: " + + "Invalid chunk ignored.", + "Aug 29, 2009 12:03:57 AM org.apache.tomcat.util.http.Parameters processParameters\nWARNING: Parameters: " + + "Invalid chunk ignored."); + + assertEquals(".*?%{CATALINA_DATESTAMP:timestamp}.+?org\\.apache\\.tomcat\\.util\\.http\\.Parameters.+?processParameters.+?" + + "WARNING.+?Parameters.+?Invalid.+?chunk.+?ignored.*", + GrokPatternCreator.findBestGrokMatchFromExamples("foo", regex, examples)); + } + + public void testFindBestGrokMatchFromExamplesGivenMultiTimestampLogs() { + + String regex = ".*?Authpriv.+?Info.+?sshd.+?subsystem.+?request.+?for.+?sftp.*"; + // Two timestamps: one local, one UTC + Collection examples = Arrays.asList( + "559550912540598297\t2016-04-20T14:06:53\t2016-04-20T21:06:53Z\t38545844\tserv02nw07\t192.168.114.28\tAuthpriv\t" + + "Info\tsshd\tsubsystem request for sftp", + "559550912548986880\t2016-04-20T14:06:53\t2016-04-20T21:06:53Z\t9049724\tserv02nw03\t10.120.48.147\tAuthpriv\t" + + "Info\tsshd\tsubsystem request for sftp", + "559550912548986887\t2016-04-20T14:06:53\t2016-04-20T21:06:53Z\t884343\tserv02tw03\t192.168.121.189\tAuthpriv\t" + + "Info\tsshd\tsubsystem request for sftp", + "559550912603512850\t2016-04-20T14:06:53\t2016-04-20T21:06:53Z\t8907014\tserv02nw01\t192.168.118.208\tAuthpriv\t" + + "Info\tsshd\tsubsystem request for sftp"); + + assertEquals(".*?%{NUMBER:field}.+?%{TIMESTAMP_ISO8601:timestamp}.+?%{TIMESTAMP_ISO8601:timestamp2}.+?%{NUMBER:field2}.+?" + + "%{IP:ipaddress}.+?Authpriv.+?Info.+?sshd.+?subsystem.+?request.+?for.+?sftp.*", + GrokPatternCreator.findBestGrokMatchFromExamples("foo", regex, examples)); + } +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobProviderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobProviderTests.java index 485fe44a95f..9fea904a99f 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobProviderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobProviderTests.java @@ -61,7 +61,6 @@ import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; @@ -235,8 +234,7 @@ public class JobProviderTests extends ESTestCase { }); } - public void testBuckets_OneBucketNoInterim() - throws InterruptedException, ExecutionException, IOException { + public void testBuckets_OneBucketNoInterim() throws IOException { String jobId = "TestJobIdentification"; Date now = new Date(); List> source = new ArrayList<>(); @@ -268,8 +266,7 @@ public class JobProviderTests extends ESTestCase { ".*")); } - public void testBuckets_OneBucketInterim() - throws InterruptedException, ExecutionException, IOException { + public void testBuckets_OneBucketInterim() throws IOException { String jobId = "TestJobIdentification"; Date now = new Date(); List> source = new ArrayList<>(); @@ -302,8 +299,7 @@ public class JobProviderTests extends ESTestCase { assertFalse(queryString.matches("(?s).*is_interim.*")); } - public void testBuckets_UsingBuilder() - throws InterruptedException, ExecutionException, IOException { + public void testBuckets_UsingBuilder() throws IOException { String jobId = "TestJobIdentification"; Date now = new Date(); List> source = new ArrayList<>(); @@ -339,8 +335,7 @@ public class JobProviderTests extends ESTestCase { assertFalse(queryString.matches("(?s).*is_interim.*")); } - public void testBucket_NoBucketNoExpand() - throws InterruptedException, ExecutionException, IOException { + public void testBucket_NoBucketNoExpand() throws IOException { String jobId = "TestJobIdentification"; Long timestamp = 98765432123456789L; List> source = new ArrayList<>(); @@ -357,8 +352,7 @@ public class JobProviderTests extends ESTestCase { assertEquals(ResourceNotFoundException.class, holder[0].getClass()); } - public void testBucket_OneBucketNoExpand() - throws InterruptedException, ExecutionException, IOException { + public void testBucket_OneBucketNoExpand() throws IOException { String jobId = "TestJobIdentification"; Date now = new Date(); List> source = new ArrayList<>(); @@ -384,7 +378,7 @@ public class JobProviderTests extends ESTestCase { assertEquals(now, b.getTimestamp()); } - public void testRecords() throws InterruptedException, ExecutionException, IOException { + public void testRecords() throws IOException { String jobId = "TestJobIdentification"; Date now = new Date(); List> source = new ArrayList<>(); @@ -431,8 +425,7 @@ public class JobProviderTests extends ESTestCase { assertEquals("irrascible", records.get(1).getFunction()); } - public void testRecords_UsingBuilder() - throws InterruptedException, ExecutionException, IOException { + public void testRecords_UsingBuilder() throws IOException { String jobId = "TestJobIdentification"; Date now = new Date(); List> source = new ArrayList<>(); @@ -485,7 +478,7 @@ public class JobProviderTests extends ESTestCase { assertEquals("irrascible", records.get(1).getFunction()); } - public void testBucketRecords() throws InterruptedException, ExecutionException, IOException { + public void testBucketRecords() throws IOException { String jobId = "TestJobIdentification"; Date now = new Date(); Bucket bucket = mock(Bucket.class); @@ -532,7 +525,7 @@ public class JobProviderTests extends ESTestCase { assertEquals("irrascible", records.get(1).getFunction()); } - public void testexpandBucket() throws InterruptedException, ExecutionException, IOException { + public void testexpandBucket() throws IOException { String jobId = "TestJobIdentification"; Date now = new Date(); Bucket bucket = new Bucket("foo", now, 22); @@ -559,8 +552,7 @@ public class JobProviderTests extends ESTestCase { assertEquals(400L, records); } - public void testCategoryDefinitions() - throws InterruptedException, ExecutionException, IOException { + public void testCategoryDefinitions() throws IOException { String jobId = "TestJobIdentification"; String terms = "the terms and conditions are not valid here"; List> source = new ArrayList<>(); @@ -580,15 +572,14 @@ public class JobProviderTests extends ESTestCase { JobProvider provider = createProvider(client); @SuppressWarnings({"unchecked", "rawtypes"}) QueryPage[] holder = new QueryPage[1]; - provider.categoryDefinitions(jobId, null, from, size, r -> holder[0] = r, + provider.categoryDefinitions(jobId, null, false, from, size, r -> holder[0] = r, e -> {throw new RuntimeException(e);}, client); QueryPage categoryDefinitions = holder[0]; assertEquals(1L, categoryDefinitions.count()); assertEquals(terms, categoryDefinitions.results().get(0).getTerms()); } - public void testCategoryDefinition() - throws InterruptedException, ExecutionException, IOException { + public void testCategoryDefinition() throws IOException { String jobId = "TestJobIdentification"; String terms = "the terms and conditions are not valid here"; @@ -603,14 +594,14 @@ public class JobProviderTests extends ESTestCase { JobProvider provider = createProvider(client); @SuppressWarnings({"unchecked", "rawtypes"}) QueryPage[] holder = new QueryPage[1]; - provider.categoryDefinitions(jobId, categoryId, null, null, + provider.categoryDefinitions(jobId, categoryId, false, null, null, r -> holder[0] = r, e -> {throw new RuntimeException(e);}, client); QueryPage categoryDefinitions = holder[0]; assertEquals(1L, categoryDefinitions.count()); assertEquals(terms, categoryDefinitions.results().get(0).getTerms()); } - public void testInfluencers_NoInterim() throws InterruptedException, ExecutionException, IOException { + public void testInfluencers_NoInterim() throws IOException { String jobId = "TestJobIdentificationForInfluencers"; Date now = new Date(); List> source = new ArrayList<>(); @@ -670,7 +661,7 @@ public class JobProviderTests extends ESTestCase { assertEquals(5.0, records.get(1).getInitialInfluencerScore(), 0.00001); } - public void testInfluencers_WithInterim() throws InterruptedException, ExecutionException, IOException { + public void testInfluencers_WithInterim() throws IOException { String jobId = "TestJobIdentificationForInfluencers"; Date now = new Date(); List> source = new ArrayList<>(); @@ -730,7 +721,7 @@ public class JobProviderTests extends ESTestCase { assertEquals(5.0, records.get(1).getInitialInfluencerScore(), 0.00001); } - public void testModelSnapshots() throws InterruptedException, ExecutionException, IOException { + public void testModelSnapshots() throws IOException { String jobId = "TestJobIdentificationForInfluencers"; Date now = new Date(); List> source = new ArrayList<>(); @@ -851,8 +842,7 @@ public class JobProviderTests extends ESTestCase { return getResponse; } - private static SearchResponse createSearchResponse(List> source) - throws IOException { + private static SearchResponse createSearchResponse(List> source) throws IOException { SearchResponse response = mock(SearchResponse.class); List list = new ArrayList<>(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/CategoryDefinitionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/CategoryDefinitionTests.java index fdaa2850823..ee7d4ad4b7a 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/CategoryDefinitionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/CategoryDefinitionTests.java @@ -25,6 +25,9 @@ public class CategoryDefinitionTests extends AbstractSerializingTestCase Date: Tue, 15 May 2018 10:35:16 +0200 Subject: [PATCH 29/31] [Tests] Relax allowed delta in extended_stats aggregation (#30569) The order in which double values are added in java can give different results for the sum, so we need to allow a certain delta in the test assertions. The current value was still a bit too low, resulting in rare test failures. This change increases the allowed margin of error by a factor of ten. --- .../search/aggregations/metrics/InternalExtendedStatsTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalExtendedStatsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalExtendedStatsTests.java index 6178a72c83e..eb6a2e40a01 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalExtendedStatsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalExtendedStatsTests.java @@ -84,7 +84,7 @@ public class InternalExtendedStatsTests extends InternalAggregationTestCase Date: Tue, 15 May 2018 14:12:30 +0530 Subject: [PATCH 30/31] [Docs] Improve section detailing translog usage (#30573) --- docs/reference/index-modules/translog.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/index-modules/translog.asciidoc b/docs/reference/index-modules/translog.asciidoc index b1eb36e346d..bed19bd5be1 100644 --- a/docs/reference/index-modules/translog.asciidoc +++ b/docs/reference/index-modules/translog.asciidoc @@ -108,8 +108,8 @@ provide a command-line tool for this, `elasticsearch-translog`. [WARNING] The `elasticsearch-translog` tool should *not* be run while Elasticsearch is -running, and you will permanently lose the documents that were contained only in -the translog! +running. If you attempt to run this tool while Elasticsearch is running, you +will permanently lose the documents that were contained only in the translog! In order to run the `elasticsearch-translog` tool, specify the `truncate` subcommand as well as the directory for the corrupted translog with the `-d` From 1de5a3180a59e670b97820004d42a027409383fb Mon Sep 17 00:00:00 2001 From: David Roberts Date: Tue, 15 May 2018 09:45:37 +0100 Subject: [PATCH 31/31] [ML] Adjust BWC version following backport of #30125 --- .../xpack/core/ml/job/results/CategoryDefinition.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/CategoryDefinition.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/CategoryDefinition.java index 90d01f66f63..7d5fb0a1bae 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/CategoryDefinition.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/CategoryDefinition.java @@ -78,7 +78,7 @@ public class CategoryDefinition implements ToXContentObject, Writeable { regex = in.readString(); maxMatchingLength = in.readLong(); examples = new TreeSet<>(in.readList(StreamInput::readString)); - if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (in.getVersion().onOrAfter(Version.V_6_4_0)) { grokPattern = in.readOptionalString(); } } @@ -91,7 +91,7 @@ public class CategoryDefinition implements ToXContentObject, Writeable { out.writeString(regex); out.writeLong(maxMatchingLength); out.writeStringList(new ArrayList<>(examples)); - if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (out.getVersion().onOrAfter(Version.V_6_4_0)) { out.writeOptionalString(grokPattern); } }