diff --git a/buildSrc/version.properties b/buildSrc/version.properties index a547982e3b6..c98e265792b 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ elasticsearch = 7.0.0-alpha1 -lucene = 7.4.0-snapshot-59f2b7aec2 +lucene = 7.4.0-snapshot-cc2ee23050 # optional dependencies spatial4j = 0.7 diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index 5dbf2709d99..68e32abb69d 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -26,8 +26,6 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; -import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.delete.DeleteRequest; @@ -592,7 +590,7 @@ public class RestHighLevelClient implements Closeable { throw validationException; } Request req = requestConverter.apply(request); - req.setHeaders(headers); + addHeaders(req, headers); Response response; try { response = client.performRequest(req); @@ -642,12 +640,19 @@ public class RestHighLevelClient implements Closeable { listener.onFailure(e); return; } - req.setHeaders(headers); + addHeaders(req, headers); ResponseListener responseListener = wrapResponseListener(responseConverter, listener, ignores); client.performRequestAsync(req, responseListener); } + private static void addHeaders(Request request, Header... headers) { + Objects.requireNonNull(headers, "headers cannot be null"); + for (Header header : headers) { + request.addHeader(header.getName(), header.getValue()); + } + } + final ResponseListener wrapResponseListener(CheckedFunction responseConverter, ActionListener actionListener, Set ignores) { return new ResponseListener() { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CustomRestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CustomRestHighLevelClientTests.java index 617b35c4d40..0bd6ecef8fb 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CustomRestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CustomRestHighLevelClientTests.java @@ -73,12 +73,12 @@ public class CustomRestHighLevelClientTests extends ESTestCase { final RestClient restClient = mock(RestClient.class); restHighLevelClient = new CustomRestClient(restClient); - doAnswer(inv -> mockPerformRequest(((Request) inv.getArguments()[0]).getHeaders()[0])) + doAnswer(inv -> mockPerformRequest(((Request) inv.getArguments()[0]).getHeaders().iterator().next())) .when(restClient) .performRequest(any(Request.class)); doAnswer(inv -> mockPerformRequestAsync( - ((Request) inv.getArguments()[0]).getHeaders()[0], + ((Request) inv.getArguments()[0]).getHeaders().iterator().next(), (ResponseListener) inv.getArguments()[1])) .when(restClient) .performRequestAsync(any(Request.class), any(ResponseListener.class)); diff --git a/client/rest/src/main/java/org/elasticsearch/client/Request.java b/client/rest/src/main/java/org/elasticsearch/client/Request.java index 92610239cae..59b82e5bf96 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/Request.java +++ b/client/rest/src/main/java/org/elasticsearch/client/Request.java @@ -19,14 +19,17 @@ package org.elasticsearch.client; -import org.apache.http.entity.ContentType; import org.apache.http.Header; import org.apache.http.HttpEntity; +import org.apache.http.entity.ContentType; +import org.apache.http.message.BasicHeader; import org.apache.http.nio.entity.NStringEntity; import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; -import java.util.Arrays; +import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Objects; @@ -36,13 +39,12 @@ import static java.util.Collections.unmodifiableMap; * HTTP Request to Elasticsearch. */ public final class Request { - private static final Header[] NO_HEADERS = new Header[0]; private final String method; private final String endpoint; private final Map parameters = new HashMap<>(); + private final List
headers = new ArrayList<>(); private HttpEntity entity; - private Header[] headers = NO_HEADERS; private HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory = HttpAsyncResponseConsumerFactory.DEFAULT; @@ -125,21 +127,19 @@ public final class Request { } /** - * Set the headers to attach to the request. + * Add the provided header to the request. */ - public void setHeaders(Header... headers) { - Objects.requireNonNull(headers, "headers cannot be null"); - for (Header header : headers) { - Objects.requireNonNull(header, "header cannot be null"); - } - this.headers = headers; + public void addHeader(String name, String value) { + Objects.requireNonNull(name, "header name cannot be null"); + Objects.requireNonNull(value, "header value cannot be null"); + this.headers.add(new ReqHeader(name, value)); } /** * Headers to attach to the request. */ - public Header[] getHeaders() { - return headers; + List
getHeaders() { + return Collections.unmodifiableList(headers); } /** @@ -175,13 +175,13 @@ public final class Request { if (entity != null) { b.append(", entity=").append(entity); } - if (headers.length > 0) { + if (headers.size() > 0) { b.append(", headers="); - for (int h = 0; h < headers.length; h++) { + for (int h = 0; h < headers.size(); h++) { if (h != 0) { b.append(','); } - b.append(headers[h].toString()); + b.append(headers.get(h).toString()); } } if (httpAsyncResponseConsumerFactory != HttpAsyncResponseConsumerFactory.DEFAULT) { @@ -204,12 +204,40 @@ public final class Request { && endpoint.equals(other.endpoint) && parameters.equals(other.parameters) && Objects.equals(entity, other.entity) - && Arrays.equals(headers, other.headers) + && headers.equals(other.headers) && httpAsyncResponseConsumerFactory.equals(other.httpAsyncResponseConsumerFactory); } @Override public int hashCode() { - return Objects.hash(method, endpoint, parameters, entity, Arrays.hashCode(headers), httpAsyncResponseConsumerFactory); + return Objects.hash(method, endpoint, parameters, entity, headers.hashCode(), httpAsyncResponseConsumerFactory); + } + + /** + * Custom implementation of {@link BasicHeader} that overrides equals and hashCode. + */ + static final class ReqHeader extends BasicHeader { + + ReqHeader(String name, String value) { + super(name, value); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + if (other instanceof ReqHeader) { + Header otherHeader = (Header) other; + return Objects.equals(getName(), otherHeader.getName()) && + Objects.equals(getValue(), otherHeader.getValue()); + } + return false; + } + + @Override + public int hashCode() { + return Objects.hash(getName(), getValue()); + } } } diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java index 05fa4d536b3..33171e18e74 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java @@ -215,7 +215,7 @@ public class RestClient implements Closeable { @Deprecated public Response performRequest(String method, String endpoint, Header... headers) throws IOException { Request request = new Request(method, endpoint); - request.setHeaders(headers); + addHeaders(request, headers); return performRequest(request); } @@ -237,7 +237,7 @@ public class RestClient implements Closeable { public Response performRequest(String method, String endpoint, Map params, Header... headers) throws IOException { Request request = new Request(method, endpoint); addParameters(request, params); - request.setHeaders(headers); + addHeaders(request, headers); return performRequest(request); } @@ -264,7 +264,7 @@ public class RestClient implements Closeable { Request request = new Request(method, endpoint); addParameters(request, params); request.setEntity(entity); - request.setHeaders(headers); + addHeaders(request, headers); return performRequest(request); } @@ -305,7 +305,7 @@ public class RestClient implements Closeable { addParameters(request, params); request.setEntity(entity); request.setHttpAsyncResponseConsumerFactory(httpAsyncResponseConsumerFactory); - request.setHeaders(headers); + addHeaders(request, headers); return performRequest(request); } @@ -325,7 +325,7 @@ public class RestClient implements Closeable { Request request; try { request = new Request(method, endpoint); - request.setHeaders(headers); + addHeaders(request, headers); } catch (Exception e) { responseListener.onFailure(e); return; @@ -352,7 +352,7 @@ public class RestClient implements Closeable { try { request = new Request(method, endpoint); addParameters(request, params); - request.setHeaders(headers); + addHeaders(request, headers); } catch (Exception e) { responseListener.onFailure(e); return; @@ -383,7 +383,7 @@ public class RestClient implements Closeable { request = new Request(method, endpoint); addParameters(request, params); request.setEntity(entity); - request.setHeaders(headers); + addHeaders(request, headers); } catch (Exception e) { responseListener.onFailure(e); return; @@ -420,7 +420,7 @@ public class RestClient implements Closeable { addParameters(request, params); request.setEntity(entity); request.setHttpAsyncResponseConsumerFactory(httpAsyncResponseConsumerFactory); - request.setHeaders(headers); + addHeaders(request, headers); } catch (Exception e) { responseListener.onFailure(e); return; @@ -539,9 +539,9 @@ public class RestClient implements Closeable { }); } - private void setHeaders(HttpRequest httpRequest, Header[] requestHeaders) { + private void setHeaders(HttpRequest httpRequest, Collection
requestHeaders) { // request headers override default headers, so we don't add default headers if they exist as request headers - final Set requestNames = new HashSet<>(requestHeaders.length); + final Set requestNames = new HashSet<>(requestHeaders.size()); for (Header requestHeader : requestHeaders) { httpRequest.addHeader(requestHeader); requestNames.add(requestHeader.getName()); @@ -877,10 +877,24 @@ public class RestClient implements Closeable { } } + /** + * Add all headers from the provided varargs argument to a {@link Request}. This only exists + * to support methods that exist for backwards compatibility. + */ + @Deprecated + private static void addHeaders(Request request, Header... headers) { + Objects.requireNonNull(headers, "headers cannot be null"); + for (Header header : headers) { + Objects.requireNonNull(header, "header cannot be null"); + request.addHeader(header.getName(), header.getValue()); + } + } + /** * Add all parameters from a map to a {@link Request}. This only exists * to support methods that exist for backwards compatibility. */ + @Deprecated private static void addParameters(Request request, Map parameters) { Objects.requireNonNull(parameters, "parameters cannot be null"); for (Map.Entry entry : parameters.entrySet()) { diff --git a/client/rest/src/test/java/org/elasticsearch/client/RequestTests.java b/client/rest/src/test/java/org/elasticsearch/client/RequestTests.java index 6625c389c6b..29bbf23a1f2 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RequestTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RequestTests.java @@ -19,21 +19,21 @@ package org.elasticsearch.client; -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; - import org.apache.http.Header; import org.apache.http.HttpEntity; import org.apache.http.entity.ByteArrayEntity; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; -import org.apache.http.message.BasicHeader; import org.apache.http.nio.entity.NStringEntity; import org.elasticsearch.client.HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory; -import static org.junit.Assert.assertArrayEquals; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertNull; @@ -127,31 +127,33 @@ public class RequestTests extends RestClientTestCase { assertEquals(json, new String(os.toByteArray(), ContentType.APPLICATION_JSON.getCharset())); } - public void testSetHeaders() { + public void testAddHeader() { final String method = randomFrom(new String[] {"GET", "PUT", "POST", "HEAD", "DELETE"}); final String endpoint = randomAsciiLettersOfLengthBetween(1, 10); Request request = new Request(method, endpoint); try { - request.setHeaders((Header[]) null); + request.addHeader(null, randomAsciiLettersOfLengthBetween(3, 10)); fail("expected failure"); } catch (NullPointerException e) { - assertEquals("headers cannot be null", e.getMessage()); + assertEquals("header name cannot be null", e.getMessage()); } try { - request.setHeaders(new Header [] {null}); + request.addHeader(randomAsciiLettersOfLengthBetween(3, 10), null); fail("expected failure"); } catch (NullPointerException e) { - assertEquals("header cannot be null", e.getMessage()); + assertEquals("header value cannot be null", e.getMessage()); } - Header[] headers = new Header[between(0, 5)]; - for (int i = 0; i < headers.length; i++) { - headers[i] = new BasicHeader(randomAsciiAlphanumOfLength(3), randomAsciiAlphanumOfLength(3)); + int numHeaders = between(0, 5); + List
headers = new ArrayList<>(); + for (int i = 0; i < numHeaders; i++) { + Header header = new Request.ReqHeader(randomAsciiAlphanumOfLengthBetween(5, 10), randomAsciiAlphanumOfLength(3)); + headers.add(header); + request.addHeader(header.getName(), header.getValue()); } - request.setHeaders(headers); - assertArrayEquals(headers, request.getHeaders()); + assertEquals(headers, new ArrayList<>(request.getHeaders())); } public void testEqualsAndHashCode() { @@ -168,7 +170,7 @@ public class RequestTests extends RestClientTestCase { assertNotEquals(mutant, request); } - private Request randomRequest() { + private static Request randomRequest() { Request request = new Request( randomFrom(new String[] {"GET", "PUT", "DELETE", "POST", "HEAD", "OPTIONS"}), randomAsciiAlphanumOfLength(5)); @@ -192,11 +194,9 @@ public class RequestTests extends RestClientTestCase { if (randomBoolean()) { int headerCount = between(1, 5); - Header[] headers = new Header[headerCount]; for (int i = 0; i < headerCount; i++) { - headers[i] = new BasicHeader(randomAsciiAlphanumOfLength(3), randomAsciiAlphanumOfLength(3)); + request.addHeader(randomAsciiAlphanumOfLength(3), randomAsciiAlphanumOfLength(3)); } - request.setHeaders(headers); } if (randomBoolean()) { @@ -206,13 +206,13 @@ public class RequestTests extends RestClientTestCase { return request; } - private Request copy(Request request) { + private static Request copy(Request request) { Request copy = new Request(request.getMethod(), request.getEndpoint()); copyMutables(request, copy); return copy; } - private Request mutate(Request request) { + private static Request mutate(Request request) { if (randomBoolean()) { // Mutate request or method but keep everything else constant Request mutant = randomBoolean() @@ -231,11 +231,7 @@ public class RequestTests extends RestClientTestCase { mutant.setJsonEntity("mutant"); // randomRequest can't produce this value return mutant; case 2: - if (mutant.getHeaders().length > 0) { - mutant.setHeaders(new Header[0]); - } else { - mutant.setHeaders(new BasicHeader("extra", "m")); - } + mutant.addHeader("extra", "m"); return mutant; case 3: mutant.setHttpAsyncResponseConsumerFactory(new HeapBufferedResponseConsumerFactory(5)); @@ -245,12 +241,14 @@ public class RequestTests extends RestClientTestCase { } } - private void copyMutables(Request from, Request to) { + private static void copyMutables(Request from, Request to) { for (Map.Entry param : from.getParameters().entrySet()) { to.addParameter(param.getKey(), param.getValue()); } to.setEntity(from.getEntity()); - to.setHeaders(from.getHeaders()); + for (Header header : from.getHeaders()) { + to.addHeader(header.getName(), header.getValue()); + } to.setHttpAsyncResponseConsumerFactory(from.getHttpAsyncResponseConsumerFactory()); } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java index 35cac627bbe..a3d0196dab9 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java @@ -29,7 +29,6 @@ import org.apache.http.HttpHost; import org.apache.http.auth.AuthScope; import org.apache.http.auth.UsernamePasswordCredentials; import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; import org.apache.http.impl.client.BasicCredentialsProvider; import org.apache.http.impl.client.TargetAuthenticationStrategy; import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; @@ -379,7 +378,9 @@ public class RestClientSingleHostIntegTests extends RestClientTestCase { String requestBody = "{ \"field\": \"value\" }"; Request request = new Request(method, "/" + statusCode); request.setJsonEntity(requestBody); - request.setHeaders(headers); + for (Header header : headers) { + request.addHeader(header.getName(), header.getValue()); + } Response esResponse; try { esResponse = restClient.performRequest(request); diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java index 714d2e57e6d..3811b60023b 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java @@ -312,7 +312,7 @@ public class RestClientSingleHostTests extends RestClientTestCase { } /** - * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testSetHeaders()}. + * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testAddHeaders()}. */ @Deprecated public void tesPerformRequestOldStyleNullHeaders() throws IOException { @@ -333,7 +333,7 @@ public class RestClientSingleHostTests extends RestClientTestCase { } /** - * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testSetParameters()}. + * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testAddParameters()}. */ @Deprecated public void testPerformRequestOldStyleWithNullParams() throws IOException { @@ -362,7 +362,9 @@ public class RestClientSingleHostTests extends RestClientTestCase { final Header[] requestHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header"); final int statusCode = randomStatusCode(getRandom()); Request request = new Request(method, "/" + statusCode); - request.setHeaders(requestHeaders); + for (Header requestHeader : requestHeaders) { + request.addHeader(requestHeader.getName(), requestHeader.getValue()); + } Response esResponse; try { esResponse = restClient.performRequest(request); @@ -436,9 +438,9 @@ public class RestClientSingleHostTests extends RestClientTestCase { final Set uniqueNames = new HashSet<>(); if (randomBoolean()) { Header[] headers = RestClientTestUtil.randomHeaders(getRandom(), "Header"); - request.setHeaders(headers); for (Header header : headers) { - expectedRequest.addHeader(header); + request.addHeader(header.getName(), header.getValue()); + expectedRequest.addHeader(new Request.ReqHeader(header.getName(), header.getValue())); uniqueNames.add(header.getName()); } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java index ea124828e45..15fa5c0f995 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java @@ -27,11 +27,13 @@ import java.io.IOException; import java.net.URI; import java.util.Collections; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods; import static org.hamcrest.Matchers.instanceOf; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; @@ -57,17 +59,20 @@ public class RestClientTests extends RestClientTestCase { restClient.performRequestAsync(new Request("unsupported", randomAsciiLettersOfLength(5)), new ResponseListener() { @Override public void onSuccess(Response response) { - fail("should have failed because of unsupported method"); + throw new UnsupportedOperationException("onSuccess cannot be called when using a mocked http client"); } @Override public void onFailure(Exception exception) { - assertThat(exception, instanceOf(UnsupportedOperationException.class)); - assertEquals("http method not supported: unsupported", exception.getMessage()); - latch.countDown(); + try { + assertThat(exception, instanceOf(UnsupportedOperationException.class)); + assertEquals("http method not supported: unsupported", exception.getMessage()); + } finally { + latch.countDown(); + } } }); - latch.await(); + assertTrue("time out waiting for request to return", latch.await(1000, TimeUnit.MILLISECONDS)); } } @@ -81,17 +86,20 @@ public class RestClientTests extends RestClientTestCase { restClient.performRequestAsync("unsupported", randomAsciiLettersOfLength(5), new ResponseListener() { @Override public void onSuccess(Response response) { - fail("should have failed because of unsupported method"); + throw new UnsupportedOperationException("onSuccess cannot be called when using a mocked http client"); } @Override public void onFailure(Exception exception) { - assertThat(exception, instanceOf(UnsupportedOperationException.class)); - assertEquals("http method not supported: unsupported", exception.getMessage()); - latch.countDown(); + try { + assertThat(exception, instanceOf(UnsupportedOperationException.class)); + assertEquals("http method not supported: unsupported", exception.getMessage()); + } finally { + latch.countDown(); + } } }); - latch.await(); + assertTrue("time out waiting for request to return", latch.await(1000, TimeUnit.MILLISECONDS)); } } @@ -105,22 +113,25 @@ public class RestClientTests extends RestClientTestCase { restClient.performRequestAsync(randomAsciiLettersOfLength(5), randomAsciiLettersOfLength(5), null, new ResponseListener() { @Override public void onSuccess(Response response) { - fail("should have failed because of null parameters"); + throw new UnsupportedOperationException("onSuccess cannot be called when using a mocked http client"); } @Override public void onFailure(Exception exception) { - assertThat(exception, instanceOf(NullPointerException.class)); - assertEquals("parameters cannot be null", exception.getMessage()); - latch.countDown(); + try { + assertThat(exception, instanceOf(NullPointerException.class)); + assertEquals("parameters cannot be null", exception.getMessage()); + } finally { + latch.countDown(); + } } }); - latch.await(); + assertTrue("time out waiting for request to return", latch.await(1000, TimeUnit.MILLISECONDS)); } } /** - * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testSetHeaders()}. + * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testAddHeader()}. */ @Deprecated public void testPerformOldStyleAsyncWithNullHeaders() throws Exception { @@ -129,18 +140,21 @@ public class RestClientTests extends RestClientTestCase { ResponseListener listener = new ResponseListener() { @Override public void onSuccess(Response response) { - fail("should have failed because of null headers"); + throw new UnsupportedOperationException("onSuccess cannot be called when using a mocked http client"); } @Override public void onFailure(Exception exception) { - assertThat(exception, instanceOf(NullPointerException.class)); - assertEquals("header cannot be null", exception.getMessage()); - latch.countDown(); + try { + assertThat(exception, instanceOf(NullPointerException.class)); + assertEquals("header cannot be null", exception.getMessage()); + } finally { + latch.countDown(); + } } }; restClient.performRequestAsync("GET", randomAsciiLettersOfLength(5), listener, (Header) null); - latch.await(); + assertTrue("time out waiting for request to return", latch.await(1000, TimeUnit.MILLISECONDS)); } } @@ -150,17 +164,20 @@ public class RestClientTests extends RestClientTestCase { restClient.performRequestAsync(new Request("GET", "::http:///"), new ResponseListener() { @Override public void onSuccess(Response response) { - fail("should have failed because of wrong endpoint"); + throw new UnsupportedOperationException("onSuccess cannot be called when using a mocked http client"); } @Override public void onFailure(Exception exception) { - assertThat(exception, instanceOf(IllegalArgumentException.class)); - assertEquals("Expected scheme name at index 0: ::http:///", exception.getMessage()); - latch.countDown(); + try { + assertThat(exception, instanceOf(IllegalArgumentException.class)); + assertEquals("Expected scheme name at index 0: ::http:///", exception.getMessage()); + } finally { + latch.countDown(); + } } }); - latch.await(); + assertTrue("time out waiting for request to return", latch.await(1000, TimeUnit.MILLISECONDS)); } } @@ -174,17 +191,20 @@ public class RestClientTests extends RestClientTestCase { restClient.performRequestAsync("GET", "::http:///", new ResponseListener() { @Override public void onSuccess(Response response) { - fail("should have failed because of wrong endpoint"); + throw new UnsupportedOperationException("onSuccess cannot be called when using a mocked http client"); } @Override public void onFailure(Exception exception) { - assertThat(exception, instanceOf(IllegalArgumentException.class)); - assertEquals("Expected scheme name at index 0: ::http:///", exception.getMessage()); - latch.countDown(); + try { + assertThat(exception, instanceOf(IllegalArgumentException.class)); + assertEquals("Expected scheme name at index 0: ::http:///", exception.getMessage()); + } finally { + latch.countDown(); + } } }); - latch.await(); + assertTrue("time out waiting for request to return", latch.await(1000, TimeUnit.MILLISECONDS)); } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java b/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java index 5ee97399b34..f3ce112fea1 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java +++ b/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java @@ -27,9 +27,7 @@ import org.apache.http.auth.AuthScope; import org.apache.http.auth.UsernamePasswordCredentials; import org.apache.http.client.CredentialsProvider; import org.apache.http.client.config.RequestConfig; -import org.apache.http.entity.BasicHttpEntity; import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; import org.apache.http.impl.client.BasicCredentialsProvider; import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; import org.apache.http.impl.nio.reactor.IOReactorConfig; @@ -52,8 +50,6 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.security.KeyStore; -import java.util.Collections; -import java.util.Map; import java.util.concurrent.CountDownLatch; /** @@ -176,9 +172,8 @@ public class RestClientDocumentation { request.setJsonEntity("{\"json\":\"text\"}"); //end::rest-client-body-shorter //tag::rest-client-headers - request.setHeaders( - new BasicHeader("Accept", "text/plain"), - new BasicHeader("Cache-Control", "no-cache")); + request.addHeader("Accept", "text/plain"); + request.addHeader("Cache-Control", "no-cache"); //end::rest-client-headers //tag::rest-client-response-consumer request.setHttpAsyncResponseConsumerFactory( diff --git a/client/test/src/main/java/org/elasticsearch/client/RestClientTestUtil.java b/client/test/src/main/java/org/elasticsearch/client/RestClientTestUtil.java index a0a6641abbc..07bae6c17fd 100644 --- a/client/test/src/main/java/org/elasticsearch/client/RestClientTestUtil.java +++ b/client/test/src/main/java/org/elasticsearch/client/RestClientTestUtil.java @@ -100,7 +100,7 @@ final class RestClientTestUtil { if (random.nextBoolean()) { headerName = headerName + i; } - headers[i] = new BasicHeader(headerName, RandomStrings.randomAsciiOfLengthBetween(random, 3, 10)); + headers[i] = new BasicHeader(headerName, RandomStrings.randomAsciiLettersOfLengthBetween(random, 3, 10)); } return headers; } diff --git a/distribution/src/bin/elasticsearch-cli.bat b/distribution/src/bin/elasticsearch-cli.bat new file mode 100644 index 00000000000..efda5f653ef Binary files /dev/null and b/distribution/src/bin/elasticsearch-cli.bat differ diff --git a/distribution/src/bin/elasticsearch-keystore.bat b/distribution/src/bin/elasticsearch-keystore.bat index 1d6616983d8..9bd72a65745 100644 Binary files a/distribution/src/bin/elasticsearch-keystore.bat and b/distribution/src/bin/elasticsearch-keystore.bat differ diff --git a/distribution/src/bin/elasticsearch-plugin.bat b/distribution/src/bin/elasticsearch-plugin.bat index b3b94a31863..d46ef295d08 100644 Binary files a/distribution/src/bin/elasticsearch-plugin.bat and b/distribution/src/bin/elasticsearch-plugin.bat differ diff --git a/distribution/src/bin/elasticsearch-translog.bat b/distribution/src/bin/elasticsearch-translog.bat index 492c1f08312..37d96bbed6c 100644 Binary files a/distribution/src/bin/elasticsearch-translog.bat and b/distribution/src/bin/elasticsearch-translog.bat differ diff --git a/docs/java-rest/low-level/usage.asciidoc b/docs/java-rest/low-level/usage.asciidoc index 68367b9a64f..012ce418226 100644 --- a/docs/java-rest/low-level/usage.asciidoc +++ b/docs/java-rest/low-level/usage.asciidoc @@ -271,7 +271,7 @@ a `ContentType` of `application/json`. include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-body-shorter] -------------------------------------------------- -And you can set a list of headers to send with the request: +And you can add one or more headers to send with the request: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- diff --git a/docs/reference/analysis/tokenizers.asciidoc b/docs/reference/analysis/tokenizers.asciidoc index add0abdec01..d6f15ded05f 100644 --- a/docs/reference/analysis/tokenizers.asciidoc +++ b/docs/reference/analysis/tokenizers.asciidoc @@ -103,6 +103,11 @@ The `simple_pattern` tokenizer uses a regular expression to capture matching text as terms. It uses a restricted subset of regular expression features and is generally faster than the `pattern` tokenizer. +<>:: + +The `char_group` tokenizer is configurable through sets of characters to split +on, which is usually less expensive than running regular expressions. + <>:: The `simple_pattern_split` tokenizer uses the same restricted regular expression @@ -143,6 +148,8 @@ include::tokenizers/keyword-tokenizer.asciidoc[] include::tokenizers/pattern-tokenizer.asciidoc[] +include::tokenizers/chargroup-tokenizer.asciidoc[] + include::tokenizers/simplepattern-tokenizer.asciidoc[] include::tokenizers/simplepatternsplit-tokenizer.asciidoc[] diff --git a/docs/reference/analysis/tokenizers/chargroup-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/chargroup-tokenizer.asciidoc new file mode 100644 index 00000000000..e6bf79b0e96 --- /dev/null +++ b/docs/reference/analysis/tokenizers/chargroup-tokenizer.asciidoc @@ -0,0 +1,80 @@ +[[analysis-chargroup-tokenizer]] +=== Char Group Tokenizer + +The `char_group` tokenizer breaks text into terms whenever it encounters a +character which is in a defined set. It is mostly useful for cases where a simple +custom tokenization is desired, and the overhead of use of the <> +is not acceptable. + +[float] +=== Configuration + +The `char_group` tokenizer accepts one parameter: + +[horizontal] +`tokenize_on_chars`:: + A list containing a list of characters to tokenize the string on. Whenever a character + from this list is encountered, a new token is started. This accepts either single + characters like eg. `-`, or character groups: `whitespace`, `letter`, `digit`, + `punctuation`, `symbol`. + + +[float] +=== Example output + +[source,js] +--------------------------- +POST _analyze +{ + "tokenizer": { + "type": "char_group", + "tokenize_on_chars": [ + "whitespace", + "-", + "\n" + ] + }, + "text": "The QUICK brown-fox" +} +--------------------------- +// CONSOLE + +returns + +[source,js] +--------------------------- +{ + "tokens": [ + { + "token": "The", + "start_offset": 0, + "end_offset": 3, + "type": "word", + "position": 0 + }, + { + "token": "QUICK", + "start_offset": 4, + "end_offset": 9, + "type": "word", + "position": 1 + }, + { + "token": "brown", + "start_offset": 10, + "end_offset": 15, + "type": "word", + "position": 2 + }, + { + "token": "fox", + "start_offset": 16, + "end_offset": 19, + "type": "word", + "position": 3 + } + ] +} +--------------------------- +// TESTRESPONSE + diff --git a/docs/reference/mapping/types.asciidoc b/docs/reference/mapping/types.asciidoc index 2cbc3a5bc54..ecb2e8dace2 100644 --- a/docs/reference/mapping/types.asciidoc +++ b/docs/reference/mapping/types.asciidoc @@ -40,6 +40,8 @@ string:: <> and <> <>:: Defines parent/child relation for documents within the same index +<>:: Record numeric features to boost hits at query time. + [float] === Multi-fields @@ -86,6 +88,6 @@ include::types/percolator.asciidoc[] include::types/parent-join.asciidoc[] - +include::types/feature.asciidoc[] diff --git a/docs/reference/mapping/types/feature.asciidoc b/docs/reference/mapping/types/feature.asciidoc new file mode 100644 index 00000000000..3b5e78d5fb4 --- /dev/null +++ b/docs/reference/mapping/types/feature.asciidoc @@ -0,0 +1,59 @@ +[[feature]] +=== Feature datatype + +A `feature` field can index numbers so that they can later be used to boost +documents in queries with a <> query. + +[source,js] +-------------------------------------------------- +PUT my_index +{ + "mappings": { + "_doc": { + "properties": { + "pagerank": { + "type": "feature" <1> + }, + "url_length": { + "type": "feature", + "positive_score_impact": false <2> + } + } + } + } +} + +PUT my_index/_doc/1 +{ + "pagerank": 8, + "url_length": 22 +} + +GET my_index/_search +{ + "query": { + "feature": { + "field": "pagerank" + } + } +} +-------------------------------------------------- +// CONSOLE +<1> Feature fields must use the `feature` field type +<2> Features that correlate negatively with the score need to declare it + +NOTE: `feature` fields only support single-valued fields and strictly positive +values. Multi-valued fields and negative values will be rejected. + +NOTE: `feature` fields do not support querying, sorting or aggregating. They may +only be used within <> queries. + +NOTE: `feature` fields only preserve 9 significant bits for the precision, which +translates to a relative error of about 0.4%. + +Features that correlate negatively with the score should set +`positive_score_impact` to `false` (defaults to `true`). This will be used by +the <> query to modify the scoring formula +in such a way that the score decreases with the value of the feature instead of +increasing. For instance in web search, the url length is a commonly used +feature which correlates negatively with scores. diff --git a/docs/reference/migration/migrate_7_0/plugins.asciidoc b/docs/reference/migration/migrate_7_0/plugins.asciidoc index 365a2c5a39f..829a93573c9 100644 --- a/docs/reference/migration/migrate_7_0/plugins.asciidoc +++ b/docs/reference/migration/migrate_7_0/plugins.asciidoc @@ -10,7 +10,7 @@ You need to use settings which are starting with `azure.client.` prefix instead. * Global timeout setting `cloud.azure.storage.timeout` has been removed. You must set it per azure client instead. Like `azure.client.default.timeout: 10s` for example. -See {plugins}/repository-azure-usage.html#repository-azure-repository-settings[Azure Repository settings]. +See {plugins}/repository-azure-repository-settings.html#repository-azure-repository-settings[Azure Repository settings]. ==== Google Cloud Storage Repository plugin diff --git a/docs/reference/modules/indices/circuit_breaker.asciidoc b/docs/reference/modules/indices/circuit_breaker.asciidoc index 857f54132cc..3df187086bb 100644 --- a/docs/reference/modules/indices/circuit_breaker.asciidoc +++ b/docs/reference/modules/indices/circuit_breaker.asciidoc @@ -76,7 +76,7 @@ memory on a node. The memory usage is based on the content length of the request [float] ==== Accounting requests circuit breaker -The in flight requests circuit breaker allows Elasticsearch to limit the memory +The accounting circuit breaker allows Elasticsearch to limit the memory usage of things held in memory that are not released when a request is completed. This includes things like the Lucene segment memory. diff --git a/docs/reference/query-dsl/feature-query.asciidoc b/docs/reference/query-dsl/feature-query.asciidoc new file mode 100644 index 00000000000..19c29b1cf3a --- /dev/null +++ b/docs/reference/query-dsl/feature-query.asciidoc @@ -0,0 +1,181 @@ +[[query-dsl-feature-query]] +=== Feature Query + +The `feature` query is a specialized query that only works on +<> fields. Its goal is to boost the score of documents based +on the values of numeric features. It is typically put in a `should` clause of +a <> query so that its score is added to the score +of the query. + +Compared to using <> or other +ways to modify the score, this query has the benefit of being able to +efficiently skip non-competitive hits when +<> is set to `false`. Speedups may be +spectacular. + +Here is an example: + +[source,js] +-------------------------------------------------- +PUT test +{ + "mappings": { + "_doc": { + "properties": { + "pagerank": { + "type": "feature" + }, + "url_length": { + "type": "feature", + "positive_score_impact": false + } + } + } + } +} + +PUT test/_doc/1 +{ + "pagerank": 10, + "url_length": 50 +} + +PUT test/_doc/2 +{ + "pagerank": 100, + "url_length": 20 +} + +POST test/_refresh + +GET test/_search +{ + "query": { + "feature": { + "field": "pagerank" + } + } +} + +GET test/_search +{ + "query": { + "feature": { + "field": "url_length" + } + } +} +-------------------------------------------------- +// CONSOLE + +[float] +=== Supported functions + +The `feature` query supports 3 functions in order to boost scores using the +values of features. If you do not know where to start, we recommend that you +start with the `saturation` function, which is the default when no function is +provided. + +[float] +==== Saturation + +This function gives a score that is equal to `S / (S + pivot)` where `S` is the +value of the feature and `pivot` is a configurable pivot value so that the +result will be less than +0.5+ if `S` is less than pivot and greater than +0.5+ +otherwise. Scores are always is +(0, 1)+. + +If the feature has a negative score impact then the function will be computed as +`pivot / (S + pivot)`, which decreases when `S` increases. + +[source,js] +-------------------------------------------------- +GET test/_search +{ + "query": { + "feature": { + "field": "pagerank", + "saturation": { + "pivot": 8 + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +If +pivot+ is not supplied then Elasticsearch will compute a default value that +will be approximately equal to the geometric mean of all feature values that +exist in the index. We recommend this if you haven't had the opportunity to +train a good pivot value. + +[source,js] +-------------------------------------------------- +GET test/_search +{ + "query": { + "feature": { + "field": "pagerank", + "saturation": {} + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +[float] +==== Logarithm + +This function gives a score that is equal to `log(scaling_factor + S)` where +`S` is the value of the feature and `scaling_factor` is a configurable scaling +factor. Scores are unbounded. + +This function only supports features that have a positive score impact. + +[source,js] +-------------------------------------------------- +GET test/_search +{ + "query": { + "feature": { + "field": "pagerank", + "log": { + "scaling_factor": 4 + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +[float] +==== Sigmoid + +This function is an extension of `saturation` which adds a configurable +exponent. Scores are computed as `S^exp^ / (S^exp^ + pivot^exp^)`. Like for the +`saturation` function, `pivot` is the value of `S` that gives a score of +0.5+ +and scores are in +(0, 1)+. + +`exponent` must be positive, but is typically in +[0.5, 1]+. A good value should +be computed via traning. If you don't have the opportunity to do so, we recommend +that you stick to the `saturation` function instead. + +[source,js] +-------------------------------------------------- +GET test/_search +{ + "query": { + "feature": { + "field": "pagerank", + "sigmoid": { + "pivot": 7, + "exponent": 0.6 + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] diff --git a/docs/reference/query-dsl/special-queries.asciidoc b/docs/reference/query-dsl/special-queries.asciidoc index a062fa7ddb1..4c69889040e 100644 --- a/docs/reference/query-dsl/special-queries.asciidoc +++ b/docs/reference/query-dsl/special-queries.asciidoc @@ -19,6 +19,11 @@ This query allows a script to act as a filter. Also see the This query finds queries that are stored as documents that match with the specified document. +<>:: + +A query that computes scores based on the values of numeric features and is +able to efficiently skip non-competitive hits. + <>:: A query that accepts other queries as json or yaml string. @@ -29,4 +34,6 @@ include::script-query.asciidoc[] include::percolate-query.asciidoc[] +include::feature-query.asciidoc[] + include::wrapper-query.asciidoc[] diff --git a/docs/reference/search/request/docvalue-fields.asciidoc b/docs/reference/search/request/docvalue-fields.asciidoc index b4d2493d853..9d917c27ab0 100644 --- a/docs/reference/search/request/docvalue-fields.asciidoc +++ b/docs/reference/search/request/docvalue-fields.asciidoc @@ -11,13 +11,38 @@ GET /_search "query" : { "match_all": {} }, - "docvalue_fields" : ["test1", "test2"] + "docvalue_fields" : [ + { + "field": "my_ip_field", <1> + "format": "use_field_mapping" <2> + }, + { + "field": "my_date_field", + "format": "epoch_millis" <3> + } + ] } -------------------------------------------------- // CONSOLE +<1> the name of the field +<2> the special `use_field_mapping` format tells Elasticsearch to use the format from the mapping +<3> date fields may use a custom format Doc value fields can work on fields that are not stored. Note that if the fields parameter specifies fields without docvalues it will try to load the value from the fielddata cache causing the terms for that field to be loaded to memory (cached), which will result in more memory consumption. +[float] +==== Custom formats + +While most fields do not support custom formats, some of them do: + - <> fields can take any <>. + - <> fields accept a https://docs.oracle.com/javase/8/docs/api/java/text/DecimalFormat.html[DecimalFormat pattern]. + +All fields support the special `use_field_mapping` format, which tells +Elasticsearch to use the mappings to figure out a default format. + +NOTE: The default is currently to return the same output as +<>. However it will change in 7.0 +to behave as if the `use_field_mapping` format was provided. diff --git a/docs/reference/search/request/inner-hits.asciidoc b/docs/reference/search/request/inner-hits.asciidoc index dce6bb2a2d8..887ae2bdf14 100644 --- a/docs/reference/search/request/inner-hits.asciidoc +++ b/docs/reference/search/request/inner-hits.asciidoc @@ -242,7 +242,12 @@ POST test/_search }, "inner_hits": { "_source" : false, - "docvalue_fields" : ["comments.text.keyword"] + "docvalue_fields" : [ + { + "field": "comments.text.keyword", + "format": "use_field_mapping" + } + ] } } } diff --git a/docs/reference/search/request/script-fields.asciidoc b/docs/reference/search/request/script-fields.asciidoc index 55623faf268..da5868ea7d6 100644 --- a/docs/reference/search/request/script-fields.asciidoc +++ b/docs/reference/search/request/script-fields.asciidoc @@ -15,13 +15,13 @@ GET /_search "test1" : { "script" : { "lang": "painless", - "source": "doc['my_field_name'].value * 2" + "source": "doc['price'].value * 2" } }, "test2" : { "script" : { "lang": "painless", - "source": "doc['my_field_name'].value * params.factor", + "source": "doc['price'].value * params.factor", "params" : { "factor" : 2.0 } @@ -31,7 +31,7 @@ GET /_search } -------------------------------------------------- // CONSOLE - +// TEST[setup:sales] Script fields can work on fields that are not stored (`my_field_name` in the above case), and allow to return custom values to be returned (the diff --git a/docs/reference/search/request/search-type.asciidoc b/docs/reference/search/request/search-type.asciidoc index 622b01c453e..7cac034f29c 100644 --- a/docs/reference/search/request/search-type.asciidoc +++ b/docs/reference/search/request/search-type.asciidoc @@ -7,7 +7,7 @@ scattered to all the relevant shards and then all the results are gathered back. When doing scatter/gather type execution, there are several ways to do that, specifically with search engines. -One of the questions when executing a distributed search is how much +One of the questions when executing a distributed search is how many results to retrieve from each shard. For example, if we have 10 shards, the 1st shard might hold the most relevant results from 0 till 10, with other shards results ranking below it. For this reason, when executing a diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CharGroupTokenizerFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CharGroupTokenizerFactory.java new file mode 100644 index 00000000000..d4e1e794a30 --- /dev/null +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CharGroupTokenizerFactory.java @@ -0,0 +1,135 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.analysis.common; + +import org.apache.lucene.analysis.Tokenizer; +import org.apache.lucene.analysis.util.CharTokenizer; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractTokenizerFactory; + +import java.util.HashSet; +import java.util.Set; + +public class CharGroupTokenizerFactory extends AbstractTokenizerFactory{ + + private final Set tokenizeOnChars = new HashSet<>(); + private boolean tokenizeOnSpace = false; + private boolean tokenizeOnLetter = false; + private boolean tokenizeOnDigit = false; + private boolean tokenizeOnPunctuation = false; + private boolean tokenizeOnSymbol = false; + + public CharGroupTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { + super(indexSettings, name, settings); + + for (final String c : settings.getAsList("tokenize_on_chars")) { + if (c == null || c.length() == 0) { + throw new RuntimeException("[tokenize_on_chars] cannot contain empty characters"); + } + + if (c.length() == 1) { + tokenizeOnChars.add((int) c.charAt(0)); + } + else if (c.charAt(0) == '\\') { + tokenizeOnChars.add((int) parseEscapedChar(c)); + } else { + switch (c) { + case "letter": + tokenizeOnLetter = true; + break; + case "digit": + tokenizeOnDigit = true; + break; + case "whitespace": + tokenizeOnSpace = true; + break; + case "punctuation": + tokenizeOnPunctuation = true; + break; + case "symbol": + tokenizeOnSymbol = true; + break; + default: + throw new RuntimeException("Invalid escaped char in [" + c + "]"); + } + } + } + } + + private char parseEscapedChar(final String s) { + int len = s.length(); + char c = s.charAt(0); + if (c == '\\') { + if (1 >= len) + throw new RuntimeException("Invalid escaped char in [" + s + "]"); + c = s.charAt(1); + switch (c) { + case '\\': + return '\\'; + case 'n': + return '\n'; + case 't': + return '\t'; + case 'r': + return '\r'; + case 'b': + return '\b'; + case 'f': + return '\f'; + case 'u': + if (len > 6) { + throw new RuntimeException("Invalid escaped char in [" + s + "]"); + } + return (char) Integer.parseInt(s.substring(2), 16); + default: + throw new RuntimeException("Invalid escaped char " + c + " in [" + s + "]"); + } + } else { + throw new RuntimeException("Invalid escaped char [" + s + "]"); + } + } + + @Override + public Tokenizer create() { + return new CharTokenizer() { + @Override + protected boolean isTokenChar(int c) { + if (tokenizeOnSpace && Character.isWhitespace(c)) { + return false; + } + if (tokenizeOnLetter && Character.isLetter(c)) { + return false; + } + if (tokenizeOnDigit && Character.isDigit(c)) { + return false; + } + if (tokenizeOnPunctuation && CharMatcher.Basic.PUNCTUATION.isTokenChar(c)) { + return false; + } + if (tokenizeOnSymbol && CharMatcher.Basic.SYMBOL.isTokenChar(c)) { + return false; + } + return !tokenizeOnChars.contains(c); + } + }; + } +} diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java index 624194092a0..02a4197fba9 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java @@ -184,6 +184,7 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin { tokenizers.put("ngram", NGramTokenizerFactory::new); tokenizers.put("edgeNGram", EdgeNGramTokenizerFactory::new); tokenizers.put("edge_ngram", EdgeNGramTokenizerFactory::new); + tokenizers.put("char_group", CharGroupTokenizerFactory::new); tokenizers.put("classic", ClassicTokenizerFactory::new); tokenizers.put("letter", LetterTokenizerFactory::new); tokenizers.put("lowercase", LowerCaseTokenizerFactory::new); diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CharGroupTokenizerFactoryTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CharGroupTokenizerFactoryTests.java new file mode 100644 index 00000000000..1447531aa87 --- /dev/null +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CharGroupTokenizerFactoryTests.java @@ -0,0 +1,74 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.analysis.common; + +import org.apache.lucene.analysis.Tokenizer; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.test.ESTokenStreamTestCase; +import org.elasticsearch.test.IndexSettingsModule; + +import java.io.IOException; +import java.io.StringReader; +import java.util.Arrays; + + +public class CharGroupTokenizerFactoryTests extends ESTokenStreamTestCase { + public void testParseTokenChars() { + final Index index = new Index("test", "_na_"); + final Settings indexSettings = newAnalysisSettingsBuilder().build(); + IndexSettings indexProperties = IndexSettingsModule.newIndexSettings(index, indexSettings); + final String name = "cg"; + for (String[] conf : Arrays.asList( + new String[] { "\\v" }, + new String[] { "\\u00245" }, + new String[] { "commas" }, + new String[] { "a", "b", "c", "\\$" })) { + final Settings settings = newAnalysisSettingsBuilder().putList("tokenize_on_chars", conf).build(); + expectThrows(RuntimeException.class, () -> new CharGroupTokenizerFactory(indexProperties, null, name, settings).create()); + } + + for (String[] conf : Arrays.asList( + new String[0], + new String[] { "\\n" }, + new String[] { "\\u0024" }, + new String[] { "whitespace" }, + new String[] { "a", "b", "c" }, + new String[] { "a", "b", "c", "\\r" }, + new String[] { "\\r" }, + new String[] { "f", "o", "o", "symbol" })) { + final Settings settings = newAnalysisSettingsBuilder().putList("tokenize_on_chars", Arrays.asList(conf)).build(); + new CharGroupTokenizerFactory(indexProperties, null, name, settings).create(); + // no exception + } + } + + public void testTokenization() throws IOException { + final Index index = new Index("test", "_na_"); + final String name = "cg"; + final Settings indexSettings = newAnalysisSettingsBuilder().build(); + final Settings settings = newAnalysisSettingsBuilder().putList("tokenize_on_chars", "whitespace", ":", "\\u0024").build(); + Tokenizer tokenizer = new CharGroupTokenizerFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), + null, name, settings).create(); + tokenizer.setReader(new StringReader("foo bar $34 test:test2")); + assertTokenStreamContents(tokenizer, new String[] {"foo", "bar", "34", "test", "test2"}); + } +} diff --git a/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 702782e1c5e..00000000000 --- a/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a3dba337d06e1f5930cb7ae638c1655b99ce0cb7 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-cc2ee23050.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 00000000000..8222106897b --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +1e28b448387ec05d655f8c81ee54e13ff2975a4d \ No newline at end of file diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureFieldMapper.java new file mode 100644 index 00000000000..5b0158ff55b --- /dev/null +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureFieldMapper.java @@ -0,0 +1,248 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.document.FeatureField; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermQuery; +import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; +import org.elasticsearch.index.query.QueryShardContext; + +import java.io.IOException; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * A {@link FieldMapper} that exposes Lucene's {@link FeatureField}. + */ +public class FeatureFieldMapper extends FieldMapper { + + public static final String CONTENT_TYPE = "feature"; + + public static class Defaults { + public static final MappedFieldType FIELD_TYPE = new FeatureFieldType(); + + static { + FIELD_TYPE.setTokenized(false); + FIELD_TYPE.setIndexOptions(IndexOptions.NONE); + FIELD_TYPE.setHasDocValues(false); + FIELD_TYPE.setOmitNorms(true); + FIELD_TYPE.freeze(); + } + } + + public static class Builder extends FieldMapper.Builder { + + public Builder(String name) { + super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE); + builder = this; + } + + @Override + public FeatureFieldType fieldType() { + return (FeatureFieldType) super.fieldType(); + } + + public Builder positiveScoreImpact(boolean v) { + fieldType().setPositiveScoreImpact(v); + return builder; + } + + @Override + public FeatureFieldMapper build(BuilderContext context) { + setupFieldType(context); + return new FeatureFieldMapper( + name, fieldType, defaultFieldType, + context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); + } + } + + public static class TypeParser implements Mapper.TypeParser { + @Override + public Mapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { + FeatureFieldMapper.Builder builder = new FeatureFieldMapper.Builder(name); + for (Iterator> iterator = node.entrySet().iterator(); iterator.hasNext();) { + Map.Entry entry = iterator.next(); + String propName = entry.getKey(); + Object propNode = entry.getValue(); + if (propName.equals("positive_score_impact")) { + builder.positiveScoreImpact(XContentMapValues.nodeBooleanValue(propNode)); + iterator.remove(); + } + } + return builder; + } + } + + public static final class FeatureFieldType extends MappedFieldType { + + private boolean positiveScoreImpact = true; + + public FeatureFieldType() { + setIndexAnalyzer(Lucene.KEYWORD_ANALYZER); + setSearchAnalyzer(Lucene.KEYWORD_ANALYZER); + } + + protected FeatureFieldType(FeatureFieldType ref) { + super(ref); + this.positiveScoreImpact = ref.positiveScoreImpact; + } + + public FeatureFieldType clone() { + return new FeatureFieldType(this); + } + + @Override + public boolean equals(Object o) { + if (super.equals(o) == false) { + return false; + } + FeatureFieldType other = (FeatureFieldType) o; + return Objects.equals(positiveScoreImpact, other.positiveScoreImpact); + } + + @Override + public int hashCode() { + int h = super.hashCode(); + h = 31 * h + Objects.hashCode(positiveScoreImpact); + return h; + } + + @Override + public void checkCompatibility(MappedFieldType other, List conflicts) { + super.checkCompatibility(other, conflicts); + if (positiveScoreImpact != ((FeatureFieldType) other).positiveScoreImpact()) { + conflicts.add("mapper [" + name() + "] has different [positive_score_impact] values"); + } + } + + @Override + public String typeName() { + return CONTENT_TYPE; + } + + public boolean positiveScoreImpact() { + return positiveScoreImpact; + } + + public void setPositiveScoreImpact(boolean positiveScoreImpact) { + checkIfFrozen(); + this.positiveScoreImpact = positiveScoreImpact; + } + + @Override + public Query existsQuery(QueryShardContext context) { + return new TermQuery(new Term("_feature", name())); + } + + @Override + public Query nullValueQuery() { + if (nullValue() == null) { + return null; + } + return termQuery(nullValue(), null); + } + + @Override + public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName) { + failIfNoDocValues(); + return new DocValuesIndexFieldData.Builder(); + } + + @Override + public Query termQuery(Object value, QueryShardContext context) { + throw new UnsupportedOperationException("Queries on [feature] fields are not supported"); + } + } + + private FeatureFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, + Settings indexSettings, MultiFields multiFields, CopyTo copyTo) { + super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo); + assert fieldType.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) <= 0; + } + + @Override + protected FeatureFieldMapper clone() { + return (FeatureFieldMapper) super.clone(); + } + + @Override + public FeatureFieldType fieldType() { + return (FeatureFieldType) super.fieldType(); + } + + @Override + protected void parseCreateField(ParseContext context, List fields) throws IOException { + float value; + if (context.externalValueSet()) { + Object v = context.externalValue(); + if (v instanceof Number) { + value = ((Number) v).floatValue(); + } else { + value = Float.parseFloat(v.toString()); + } + } else if (context.parser().currentToken() == Token.VALUE_NULL) { + // skip + return; + } else { + value = context.parser().floatValue(); + } + + if (context.doc().getByKey(name()) != null) { + throw new IllegalArgumentException("[feature] fields do not support indexing multiple values for the same field [" + name() + + "] in the same document"); + } + + if (fieldType().positiveScoreImpact() == false) { + value = 1 / value; + } + + context.doc().addWithKey(name(), new FeatureField("_feature", name(), value)); + } + + @Override + protected String contentType() { + return CONTENT_TYPE; + } + + @Override + protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException { + super.doXContentBody(builder, includeDefaults, params); + + if (includeDefaults || fieldType().nullValue() != null) { + builder.field("null_value", fieldType().nullValue()); + } + + if (includeDefaults || fieldType().positiveScoreImpact() == false) { + builder.field("positive_score_impact", fieldType().positiveScoreImpact()); + } + } +} diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureMetaFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureMetaFieldMapper.java new file mode 100644 index 00000000000..2102a029a6a --- /dev/null +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureMetaFieldMapper.java @@ -0,0 +1,151 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.search.Query; +import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.query.QueryShardContext; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +/** + * This meta field only exists because feature fields index everything into a + * common _feature field and Elasticsearch has a custom codec that complains + * when fields exist in the index and not in mappings. + */ +public class FeatureMetaFieldMapper extends MetadataFieldMapper { + + public static final String NAME = "_feature"; + + public static final String CONTENT_TYPE = "_feature"; + + public static class Defaults { + public static final MappedFieldType FIELD_TYPE = new FeatureMetaFieldType(); + + static { + FIELD_TYPE.setIndexOptions(IndexOptions.DOCS_AND_FREQS); + FIELD_TYPE.setTokenized(true); + FIELD_TYPE.setStored(false); + FIELD_TYPE.setOmitNorms(true); + FIELD_TYPE.setIndexAnalyzer(Lucene.KEYWORD_ANALYZER); + FIELD_TYPE.setSearchAnalyzer(Lucene.KEYWORD_ANALYZER); + FIELD_TYPE.setName(NAME); + FIELD_TYPE.freeze(); + } + } + + public static class Builder extends MetadataFieldMapper.Builder { + + public Builder(MappedFieldType existing) { + super(NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE); + } + + @Override + public FeatureMetaFieldMapper build(BuilderContext context) { + setupFieldType(context); + return new FeatureMetaFieldMapper(fieldType, context.indexSettings()); + } + } + + public static class TypeParser implements MetadataFieldMapper.TypeParser { + @Override + public MetadataFieldMapper.Builder parse(String name, + Map node, ParserContext parserContext) throws MapperParsingException { + return new Builder(parserContext.mapperService().fullName(NAME)); + } + + @Override + public MetadataFieldMapper getDefault(MappedFieldType fieldType, ParserContext context) { + final Settings indexSettings = context.mapperService().getIndexSettings().getSettings(); + if (fieldType != null) { + return new FeatureMetaFieldMapper(indexSettings, fieldType); + } else { + return parse(NAME, Collections.emptyMap(), context) + .build(new BuilderContext(indexSettings, new ContentPath(1))); + } + } + } + + public static final class FeatureMetaFieldType extends MappedFieldType { + + public FeatureMetaFieldType() { + } + + protected FeatureMetaFieldType(FeatureMetaFieldType ref) { + super(ref); + } + + @Override + public FeatureMetaFieldType clone() { + return new FeatureMetaFieldType(this); + } + + @Override + public String typeName() { + return CONTENT_TYPE; + } + + @Override + public Query existsQuery(QueryShardContext context) { + throw new UnsupportedOperationException("Cannot run exists query on [_feature]"); + } + + @Override + public Query termQuery(Object value, QueryShardContext context) { + throw new UnsupportedOperationException("The [_feature] field may not be queried directly"); + } + } + + private FeatureMetaFieldMapper(Settings indexSettings, MappedFieldType existing) { + this(existing.clone(), indexSettings); + } + + private FeatureMetaFieldMapper(MappedFieldType fieldType, Settings indexSettings) { + super(NAME, fieldType, Defaults.FIELD_TYPE, indexSettings); + } + + @Override + public void preParse(ParseContext context) throws IOException {} + + @Override + protected void parseCreateField(ParseContext context, List fields) throws IOException { + throw new AssertionError("Should never be called"); + } + + @Override + public void postParse(ParseContext context) throws IOException {} + + @Override + protected String contentType() { + return CONTENT_TYPE; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder; + } +} diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/MapperExtrasPlugin.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/MapperExtrasPlugin.java index 2b249a5fe6e..4a9aea21a8a 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/MapperExtrasPlugin.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/MapperExtrasPlugin.java @@ -19,21 +19,37 @@ package org.elasticsearch.index.mapper; +import org.elasticsearch.index.mapper.MetadataFieldMapper.TypeParser; +import org.elasticsearch.index.query.FeatureQueryBuilder; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.SearchPlugin; import java.util.Collections; import java.util.LinkedHashMap; +import java.util.List; import java.util.Map; -public class MapperExtrasPlugin extends Plugin implements MapperPlugin { +public class MapperExtrasPlugin extends Plugin implements MapperPlugin, SearchPlugin { @Override public Map getMappers() { Map mappers = new LinkedHashMap<>(); mappers.put(ScaledFloatFieldMapper.CONTENT_TYPE, new ScaledFloatFieldMapper.TypeParser()); mappers.put(TokenCountFieldMapper.CONTENT_TYPE, new TokenCountFieldMapper.TypeParser()); + mappers.put(FeatureFieldMapper.CONTENT_TYPE, new FeatureFieldMapper.TypeParser()); return Collections.unmodifiableMap(mappers); } + @Override + public Map getMetadataMappers() { + return Collections.singletonMap(FeatureMetaFieldMapper.CONTENT_TYPE, new FeatureMetaFieldMapper.TypeParser()); + } + + @Override + public List> getQueries() { + return Collections.singletonList( + new QuerySpec<>(FeatureQueryBuilder.NAME, FeatureQueryBuilder::new, p -> FeatureQueryBuilder.PARSER.parse(p, null))); + } + } diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/query/FeatureQueryBuilder.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/query/FeatureQueryBuilder.java new file mode 100644 index 00000000000..761de46731d --- /dev/null +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/query/FeatureQueryBuilder.java @@ -0,0 +1,354 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query; + +import org.apache.lucene.document.FeatureField; +import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.Query; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.mapper.FeatureFieldMapper.FeatureFieldType; +import org.elasticsearch.index.mapper.MappedFieldType; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Objects; + +/** + * Query to run on a [feature] field. + */ +public final class FeatureQueryBuilder extends AbstractQueryBuilder { + + /** + * Scoring function for a [feature] field. + */ + public abstract static class ScoreFunction { + + private ScoreFunction() {} // prevent extensions by users + + abstract void writeTo(StreamOutput out) throws IOException; + + abstract Query toQuery(String feature, boolean positiveScoreImpact) throws IOException; + + abstract void doXContent(XContentBuilder builder) throws IOException; + + /** + * A scoring function that scores documents as {@code Math.log(scalingFactor + S)} + * where S is the value of the static feature. + */ + public static class Log extends ScoreFunction { + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "log", a -> new Log((Float) a[0])); + static { + PARSER.declareFloat(ConstructingObjectParser.constructorArg(), new ParseField("scaling_factor")); + } + + private final float scalingFactor; + + public Log(float scalingFactor) { + this.scalingFactor = scalingFactor; + } + + private Log(StreamInput in) throws IOException { + this(in.readFloat()); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + Log that = (Log) obj; + return scalingFactor == that.scalingFactor; + } + + @Override + public int hashCode() { + return Float.hashCode(scalingFactor); + } + + @Override + void writeTo(StreamOutput out) throws IOException { + out.writeByte((byte) 0); + out.writeFloat(scalingFactor); + } + + @Override + void doXContent(XContentBuilder builder) throws IOException { + builder.startObject("log"); + builder.field("scaling_factor", scalingFactor); + builder.endObject(); + } + + @Override + Query toQuery(String feature, boolean positiveScoreImpact) throws IOException { + if (positiveScoreImpact == false) { + throw new IllegalArgumentException("Cannot use the [log] function with a field that has a negative score impact as " + + "it would trigger negative scores"); + } + return FeatureField.newLogQuery("_feature", feature, DEFAULT_BOOST, scalingFactor); + } + } + + /** + * A scoring function that scores documents as {@code S / (S + pivot)} where S is + * the value of the static feature. + */ + public static class Saturation extends ScoreFunction { + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "saturation", a -> new Saturation((Float) a[0])); + static { + PARSER.declareFloat(ConstructingObjectParser.optionalConstructorArg(), new ParseField("pivot")); + } + + private final Float pivot; + + /** Constructor with a default pivot, computed as the geometric average of + * all feature values in the index. */ + public Saturation() { + this((Float) null); + } + + public Saturation(float pivot) { + this(Float.valueOf(pivot)); + } + + private Saturation(Float pivot) { + this.pivot = pivot; + } + + private Saturation(StreamInput in) throws IOException { + this(in.readOptionalFloat()); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + Saturation that = (Saturation) obj; + return Objects.equals(pivot, that.pivot); + } + + @Override + public int hashCode() { + return Objects.hashCode(pivot); + } + + @Override + void writeTo(StreamOutput out) throws IOException { + out.writeByte((byte) 1); + out.writeOptionalFloat(pivot); + } + + @Override + void doXContent(XContentBuilder builder) throws IOException { + builder.startObject("saturation"); + if (pivot != null) { + builder.field("pivot", pivot); + } + builder.endObject(); + } + + @Override + Query toQuery(String feature, boolean positiveScoreImpact) throws IOException { + if (pivot == null) { + return FeatureField.newSaturationQuery("_feature", feature); + } else { + return FeatureField.newSaturationQuery("_feature", feature, DEFAULT_BOOST, pivot); + } + } + } + + /** + * A scoring function that scores documents as {@code S^exp / (S^exp + pivot^exp)} + * where S is the value of the static feature. + */ + public static class Sigmoid extends ScoreFunction { + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "sigmoid", a -> new Sigmoid((Float) a[0], ((Float) a[1]).floatValue())); + static { + PARSER.declareFloat(ConstructingObjectParser.constructorArg(), new ParseField("pivot")); + PARSER.declareFloat(ConstructingObjectParser.constructorArg(), new ParseField("exponent")); + } + + private final float pivot; + private final float exp; + + public Sigmoid(float pivot, float exp) { + this.pivot = pivot; + this.exp = exp; + } + + private Sigmoid(StreamInput in) throws IOException { + this(in.readFloat(), in.readFloat()); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + Sigmoid that = (Sigmoid) obj; + return pivot == that.pivot + && exp == that.exp; + } + + @Override + public int hashCode() { + return Objects.hash(pivot, exp); + } + + @Override + void writeTo(StreamOutput out) throws IOException { + out.writeByte((byte) 2); + out.writeFloat(pivot); + out.writeFloat(exp); + } + + @Override + void doXContent(XContentBuilder builder) throws IOException { + builder.startObject("sigmoid"); + builder.field("pivot", pivot); + builder.field("exponent", exp); + builder.endObject(); + } + + @Override + Query toQuery(String feature, boolean positiveScoreImpact) throws IOException { + return FeatureField.newSigmoidQuery("_feature", feature, DEFAULT_BOOST, pivot, exp); + } + } + } + + private static ScoreFunction readScoreFunction(StreamInput in) throws IOException { + byte b = in.readByte(); + switch (b) { + case 0: + return new ScoreFunction.Log(in); + case 1: + return new ScoreFunction.Saturation(in); + case 2: + return new ScoreFunction.Sigmoid(in); + default: + throw new IOException("Illegal score function id: " + b); + } + } + + public static ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "feature", args -> { + final String field = (String) args[0]; + final float boost = args[1] == null ? DEFAULT_BOOST : (Float) args[1]; + final String queryName = (String) args[2]; + long numNonNulls = Arrays.stream(args, 3, args.length).filter(Objects::nonNull).count(); + final FeatureQueryBuilder query; + if (numNonNulls > 1) { + throw new IllegalArgumentException("Can only specify one of [log], [saturation] and [sigmoid]"); + } else if (numNonNulls == 0) { + query = new FeatureQueryBuilder(field, new ScoreFunction.Saturation()); + } else { + ScoreFunction scoreFunction = (ScoreFunction) Arrays.stream(args, 3, args.length) + .filter(Objects::nonNull) + .findAny() + .get(); + query = new FeatureQueryBuilder(field, scoreFunction); + } + query.boost(boost); + query.queryName(queryName); + return query; + }); + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField("field")); + PARSER.declareFloat(ConstructingObjectParser.optionalConstructorArg(), BOOST_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), NAME_FIELD); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), + ScoreFunction.Log.PARSER, new ParseField("log")); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), + ScoreFunction.Saturation.PARSER, new ParseField("saturation")); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), + ScoreFunction.Sigmoid.PARSER, new ParseField("sigmoid")); + } + + public static final String NAME = "feature"; + + private final String field; + private final ScoreFunction scoreFunction; + + public FeatureQueryBuilder(String field, ScoreFunction scoreFunction) { + this.field = Objects.requireNonNull(field); + this.scoreFunction = Objects.requireNonNull(scoreFunction); + } + + public FeatureQueryBuilder(StreamInput in) throws IOException { + super(in); + this.field = in.readString(); + this.scoreFunction = readScoreFunction(in); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + protected void doWriteTo(StreamOutput out) throws IOException { + out.writeString(field); + scoreFunction.writeTo(out); + } + + @Override + protected void doXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(getName()); + builder.field("field", field); + scoreFunction.doXContent(builder); + printBoostAndQueryName(builder); + builder.endObject(); + } + + @Override + protected Query doToQuery(QueryShardContext context) throws IOException { + final MappedFieldType ft = context.fieldMapper(field); + if (ft == null) { + return new MatchNoDocsQuery(); + } + if (ft instanceof FeatureFieldType == false) { + throw new IllegalArgumentException("[feature] query only works on [feature] fields, not [" + ft.typeName() + "]"); + } + final FeatureFieldType fft = (FeatureFieldType) ft; + return scoreFunction.toQuery(field, fft.positiveScoreImpact()); + } + + @Override + protected boolean doEquals(FeatureQueryBuilder other) { + return Objects.equals(field, other.field) && Objects.equals(scoreFunction, other.scoreFunction); + } + + @Override + protected int doHashCode() { + return Objects.hash(field, scoreFunction); + } + +} diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureFieldMapperTests.java new file mode 100644 index 00000000000..2e9fa98cbbe --- /dev/null +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureFieldMapperTests.java @@ -0,0 +1,173 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.tokenattributes.TermFrequencyAttribute; +import org.apache.lucene.document.FeatureField; +import org.apache.lucene.index.IndexableField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.hamcrest.Matchers; +import org.junit.Before; + +import java.io.IOException; +import java.util.Collection; + +public class FeatureFieldMapperTests extends ESSingleNodeTestCase { + + IndexService indexService; + DocumentMapperParser parser; + + @Before + public void setup() { + indexService = createIndex("test"); + parser = indexService.mapperService().documentMapperParser(); + } + + @Override + protected Collection> getPlugins() { + return pluginList(MapperExtrasPlugin.class); + } + + private static int getFrequency(TokenStream tk) throws IOException { + TermFrequencyAttribute freqAttribute = tk.addAttribute(TermFrequencyAttribute.class); + tk.reset(); + assertTrue(tk.incrementToken()); + int freq = freqAttribute.getTermFrequency(); + assertFalse(tk.incrementToken()); + return freq; + } + + public void testDefaults() throws Exception { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field").field("type", "feature").endObject().endObject() + .endObject().endObject()); + + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + + assertEquals(mapping, mapper.mappingSource().toString()); + + ParsedDocument doc1 = mapper.parse(SourceToParse.source("test", "type", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .field("field", 10) + .endObject()), + XContentType.JSON)); + + IndexableField[] fields = doc1.rootDoc().getFields("_feature"); + assertEquals(1, fields.length); + assertThat(fields[0], Matchers.instanceOf(FeatureField.class)); + FeatureField featureField1 = (FeatureField) fields[0]; + + ParsedDocument doc2 = mapper.parse(SourceToParse.source("test", "type", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .field("field", 12) + .endObject()), + XContentType.JSON)); + + FeatureField featureField2 = (FeatureField) doc2.rootDoc().getFields("_feature")[0]; + + int freq1 = getFrequency(featureField1.tokenStream(null, null)); + int freq2 = getFrequency(featureField2.tokenStream(null, null)); + assertTrue(freq1 < freq2); + } + + public void testNegativeScoreImpact() throws Exception { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field").field("type", "feature") + .field("positive_score_impact", false).endObject().endObject() + .endObject().endObject()); + + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + + assertEquals(mapping, mapper.mappingSource().toString()); + + ParsedDocument doc1 = mapper.parse(SourceToParse.source("test", "type", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .field("field", 10) + .endObject()), + XContentType.JSON)); + + IndexableField[] fields = doc1.rootDoc().getFields("_feature"); + assertEquals(1, fields.length); + assertThat(fields[0], Matchers.instanceOf(FeatureField.class)); + FeatureField featureField1 = (FeatureField) fields[0]; + + ParsedDocument doc2 = mapper.parse(SourceToParse.source("test", "type", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .field("field", 12) + .endObject()), + XContentType.JSON)); + + FeatureField featureField2 = (FeatureField) doc2.rootDoc().getFields("_feature")[0]; + + int freq1 = getFrequency(featureField1.tokenStream(null, null)); + int freq2 = getFrequency(featureField2.tokenStream(null, null)); + assertTrue(freq1 > freq2); + } + + public void testRejectMultiValuedFields() throws MapperParsingException, IOException { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field").field("type", "feature").endObject().startObject("foo") + .startObject("properties").startObject("field").field("type", "feature").endObject().endObject() + .endObject().endObject().endObject().endObject()); + + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + + assertEquals(mapping, mapper.mappingSource().toString()); + + MapperParsingException e = null;/*expectThrows(MapperParsingException.class, + () -> mapper.parse(SourceToParse.source("test", "type", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .field("field", Arrays.asList(10, 20)) + .endObject()), + XContentType.JSON))); + assertEquals("[feature] fields do not support indexing multiple values for the same field [field] in the same document", + e.getCause().getMessage());*/ + + e = expectThrows(MapperParsingException.class, + () -> mapper.parse(SourceToParse.source("test", "type", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .startArray("foo") + .startObject() + .field("field", 10) + .endObject() + .startObject() + .field("field", 20) + .endObject() + .endArray() + .endObject()), + XContentType.JSON))); + assertEquals("[feature] fields do not support indexing multiple values for the same field [foo.field] in the same document", + e.getCause().getMessage()); + } +} diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureFieldTypeTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureFieldTypeTests.java new file mode 100644 index 00000000000..9debd073660 --- /dev/null +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureFieldTypeTests.java @@ -0,0 +1,46 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.junit.Before; + +public class FeatureFieldTypeTests extends FieldTypeTestCase { + + @Override + protected MappedFieldType createDefaultFieldType() { + return new FeatureFieldMapper.FeatureFieldType(); + } + + @Before + public void setupProperties() { + addModifier(new Modifier("positive_score_impact", false) { + @Override + public void modify(MappedFieldType ft) { + FeatureFieldMapper.FeatureFieldType tft = (FeatureFieldMapper.FeatureFieldType)ft; + tft.setPositiveScoreImpact(tft.positiveScoreImpact() == false); + } + @Override + public void normalizeOther(MappedFieldType other) { + super.normalizeOther(other); + ((FeatureFieldMapper.FeatureFieldType) other).setPositiveScoreImpact(true); + } + }); + } +} diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureMetaFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureMetaFieldMapperTests.java new file mode 100644 index 00000000000..99697b1abaf --- /dev/null +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureMetaFieldMapperTests.java @@ -0,0 +1,58 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.junit.Before; + +import java.util.Collection; + +public class FeatureMetaFieldMapperTests extends ESSingleNodeTestCase { + + IndexService indexService; + DocumentMapperParser parser; + + @Before + public void setup() { + indexService = createIndex("test"); + parser = indexService.mapperService().documentMapperParser(); + } + + @Override + protected Collection> getPlugins() { + return pluginList(MapperExtrasPlugin.class); + } + + public void testBasics() throws Exception { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field").field("type", "feature").endObject().endObject() + .endObject().endObject()); + + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + + assertEquals(mapping, mapper.mappingSource().toString()); + assertNotNull(mapper.metadataMapper(FeatureMetaFieldMapper.class)); + } +} diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureMetaFieldTypeTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureMetaFieldTypeTests.java new file mode 100644 index 00000000000..ef261573c96 --- /dev/null +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureMetaFieldTypeTests.java @@ -0,0 +1,29 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +public class FeatureMetaFieldTypeTests extends FieldTypeTestCase { + + @Override + protected MappedFieldType createDefaultFieldType() { + return new FeatureMetaFieldMapper.FeatureMetaFieldType(); + } + +} diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/query/FeatureQueryBuilderTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/query/FeatureQueryBuilderTests.java new file mode 100644 index 00000000000..883dce5f385 --- /dev/null +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/query/FeatureQueryBuilderTests.java @@ -0,0 +1,130 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query; + +import org.apache.lucene.document.FeatureField; +import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.Query; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.index.mapper.MapperExtrasPlugin; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.query.FeatureQueryBuilder.ScoreFunction; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.test.AbstractQueryTestCase; + +import java.io.IOException; +import java.util.Collection; +import java.util.Collections; + +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.Matchers.either; + +public class FeatureQueryBuilderTests extends AbstractQueryTestCase { + + @Override + protected void initializeAdditionalMappings(MapperService mapperService) throws IOException { + for (String type : getCurrentTypes()) { + mapperService.merge(type, new CompressedXContent(Strings.toString(PutMappingRequest.buildFromSimplifiedDef(type, + "my_feature_field", "type=feature", + "my_negative_feature_field", "type=feature,positive_score_impact=false"))), MapperService.MergeReason.MAPPING_UPDATE); + } + } + + @Override + protected Collection> getPlugins() { + return Collections.singleton(MapperExtrasPlugin.class); + } + + @Override + protected FeatureQueryBuilder doCreateTestQueryBuilder() { + ScoreFunction function; + switch (random().nextInt(3)) { + case 0: + function = new ScoreFunction.Log(1 + randomFloat()); + break; + case 1: + if (randomBoolean()) { + function = new ScoreFunction.Saturation(); + } else { + function = new ScoreFunction.Saturation(randomFloat()); + } + break; + case 2: + function = new ScoreFunction.Sigmoid(randomFloat(), randomFloat()); + break; + default: + throw new AssertionError(); + } + return new FeatureQueryBuilder("my_feature_field", function); + } + + @Override + protected void doAssertLuceneQuery(FeatureQueryBuilder queryBuilder, Query query, SearchContext context) throws IOException { + Class expectedClass = FeatureField.newSaturationQuery("", "", 1, 1).getClass(); + assertThat(query, either(instanceOf(MatchNoDocsQuery.class)).or(instanceOf(expectedClass))); + } + + @Override + @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/30605") + public void testUnknownField() { + super.testUnknownField(); + } + + public void testDefaultScoreFunction() throws IOException { + assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); + String query = "{\n" + + " \"feature\" : {\n" + + " \"field\": \"my_feature_field\"\n" + + " }\n" + + "}"; + Query parsedQuery = parseQuery(query).toQuery(createShardContext()); + assertEquals(FeatureField.newSaturationQuery("_feature", "my_feature_field"), parsedQuery); + } + + public void testIllegalField() throws IOException { + assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); + String query = "{\n" + + " \"feature\" : {\n" + + " \"field\": \"" + STRING_FIELD_NAME + "\"\n" + + " }\n" + + "}"; + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> parseQuery(query).toQuery(createShardContext())); + assertEquals("[feature] query only works on [feature] fields, not [text]", e.getMessage()); + } + + public void testIllegalCombination() throws IOException { + assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); + String query = "{\n" + + " \"feature\" : {\n" + + " \"field\": \"my_negative_feature_field\",\n" + + " \"log\" : {\n" + + " \"scaling_factor\": 4.5\n" + + " }\n" + + " }\n" + + "}"; + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> parseQuery(query).toQuery(createShardContext())); + assertEquals( + "Cannot use the [log] function with a field that has a negative score impact as it would trigger negative scores", + e.getMessage()); + } +} diff --git a/modules/mapper-extras/src/test/resources/rest-api-spec/test/feature/10_basic.yml b/modules/mapper-extras/src/test/resources/rest-api-spec/test/feature/10_basic.yml new file mode 100644 index 00000000000..83185508765 --- /dev/null +++ b/modules/mapper-extras/src/test/resources/rest-api-spec/test/feature/10_basic.yml @@ -0,0 +1,160 @@ +setup: + - skip: + version: " - 6.99.99" + reason: "The feature field/query was introduced in 7.0.0" + + - do: + indices.create: + index: test + body: + settings: + number_of_replicas: 0 + mappings: + _doc: + properties: + pagerank: + type: feature + url_length: + type: feature + positive_score_impact: false + + - do: + index: + index: test + type: _doc + id: 1 + body: + pagerank: 10 + url_length: 50 + + - do: + index: + index: test + type: _doc + id: 2 + body: + pagerank: 100 + url_length: 20 + + - do: + indices.refresh: {} + +--- +"Positive log": + + - do: + search: + body: + query: + feature: + field: pagerank + log: + scaling_factor: 3 + + - match: + hits.total: 2 + + - match: + hits.hits.0._id: "2" + + - match: + hits.hits.1._id: "1" + +--- +"Positive saturation": + + - do: + search: + body: + query: + feature: + field: pagerank + saturation: + pivot: 20 + + - match: + hits.total: 2 + + - match: + hits.hits.0._id: "2" + + - match: + hits.hits.1._id: "1" + +--- +"Positive sigmoid": + + - do: + search: + body: + query: + feature: + field: pagerank + sigmoid: + pivot: 20 + exponent: 0.6 + + - match: + hits.total: 2 + + - match: + hits.hits.0._id: "2" + + - match: + hits.hits.1._id: "1" + +--- +"Negative log": + + - do: + catch: bad_request + search: + body: + query: + feature: + field: url_length + log: + scaling_factor: 3 + +--- +"Negative saturation": + + - do: + search: + body: + query: + feature: + field: url_length + saturation: + pivot: 20 + + - match: + hits.total: 2 + + - match: + hits.hits.0._id: "2" + + - match: + hits.hits.1._id: "1" + +--- +"Negative sigmoid": + + - do: + search: + body: + query: + feature: + field: url_length + sigmoid: + pivot: 20 + exponent: 0.6 + + - match: + hits.total: 2 + + - match: + hits.hits.0._id: "2" + + - match: + hits.hits.1._id: "1" diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index f99b0177de5..00000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -473a7f4d955f132bb498482648266653f8da85bd \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-cc2ee23050.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 00000000000..781b814c99e --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +452c9a9f86b79b9b3eaa7d6aa782e189d5bcfe8f \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 08269eed636..00000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c5a72b9a790e2552248c8bbb36af47c4c399ba27 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-cc2ee23050.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 00000000000..baba0897858 --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +48c76a922bdfc7f50b1b6fe22e9456c555f3f990 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 325fe161204..00000000000 --- a/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -14f680ab9b886c7c5224ff682a7fa70b6df44a05 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-cc2ee23050.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 00000000000..da19e1c3857 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +4db5777df468b0867ff6539c9ab687e0ed6cab41 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 9e88119ed1d..00000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e033c68c9ec1ba9cd8439758adf7eb5fee22acef \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-cc2ee23050.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 00000000000..148b5425d64 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +0e09e6b011ab2b1a0e3e0e1df2ab2a91dca8ba23 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 74721c85757..00000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -08df0a5029f11c109b22064dec78c05dfa25f9e3 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-cc2ee23050.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 00000000000..bce84d16a9a --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +ceefa0f9789ab9ea5c8ab9f67ed7a601a3ae6aa9 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 1c257797c08..00000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a9d1819b2b13f134f6a605ab5a59ce3c602c0460 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-cc2ee23050.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 00000000000..762c56f7700 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +b013adc183e52a74795ad3d3032f4d0f9db30b73 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 117ac05c91f..00000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -47bc91ccb0cdf0c1c404646ffe0d5fd6b020a4ab \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-cc2ee23050.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 00000000000..7631bea2569 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +95300f29418f60e57e022d934d3462be9e1e2225 \ No newline at end of file diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java index 6f4453aa06c..eb5517b7acb 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java @@ -18,33 +18,8 @@ */ package org.elasticsearch.upgrades; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; -import org.elasticsearch.Version; -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.client.Response; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.test.rest.ESRestTestCase; -import org.elasticsearch.test.rest.yaml.ObjectPath; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.concurrent.Future; -import java.util.function.Predicate; - -import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiOfLength; -import static java.util.Collections.emptyMap; -import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; -import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING; -import static org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.notNullValue; public abstract class AbstractRollingTestCase extends ESRestTestCase { protected enum ClusterType { diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java index 350636551d9..1351de16cf7 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java @@ -26,7 +26,6 @@ import org.elasticsearch.client.Response; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AbstractRunnable; -import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.yaml.ObjectPath; import java.io.IOException; diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/XPackIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/XPackIT.java new file mode 100644 index 00000000000..3ed98a5d1f7 --- /dev/null +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/XPackIT.java @@ -0,0 +1,111 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.upgrades; + +import org.apache.http.util.EntityUtils; +import org.elasticsearch.common.Booleans; +import org.junit.Before; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; + +import static org.hamcrest.Matchers.equalTo; +import static org.junit.Assume.assumeThat; + +/** + * Basic tests for simple xpack functionality that are only run if the + * cluster is the on the "zip" distribution. + */ +public class XPackIT extends AbstractRollingTestCase { + @Before + public void skipIfNotXPack() { + assumeThat("test is only supported if the distribution contains xpack", + System.getProperty("tests.distribution"), equalTo("zip")); + assumeThat("running this on the unupgraded cluster would change its state and it wouldn't work prior to 6.3 anyway", + CLUSTER_TYPE, equalTo(ClusterType.UPGRADED)); + /* + * *Mostly* we want this for when we're upgrading from pre-6.3's + * zip distribution which doesn't contain xpack to post 6.3's zip + * distribution which *does* contain xpack. But we'll also run it + * on all upgrades for completeness's sake. + */ + } + + /** + * Test a basic feature (SQL) which doesn't require any trial license. + * Note that the test methods on this class can run in any order so we + * might have already installed a trial license. + */ + public void testBasicFeature() throws IOException { + Request bulk = new Request("POST", "/sql_test/doc/_bulk"); + bulk.setJsonEntity( + "{\"index\":{}}\n" + + "{\"f\": \"1\"}\n" + + "{\"index\":{}}\n" + + "{\"f\": \"2\"}\n"); + bulk.addParameter("refresh", "true"); + client().performRequest(bulk); + + Request sql = new Request("POST", "/_xpack/sql"); + sql.setJsonEntity("{\"query\": \"SELECT * FROM sql_test WHERE f > 1 ORDER BY f ASC\"}"); + String response = EntityUtils.toString(client().performRequest(sql).getEntity()); + assertEquals("{\"columns\":[{\"name\":\"f\",\"type\":\"text\"}],\"rows\":[[\"2\"]]}", response); + } + + /** + * Test creating a trial license and using it. This is interesting because + * our other tests test cover starting a new cluster with the default + * distribution and enabling the trial license but this test is the only + * one that can upgrade from the oss distribution to the default + * distribution with xpack and the create a trial license. We don't + * do a lot with the trial license because for the most + * part those things are tested elsewhere, off in xpack. But we do use the + * trial license a little bit to make sure that it works. + */ + public void testTrialLicense() throws IOException { + Request startTrial = new Request("POST", "/_xpack/license/start_trial"); + startTrial.addParameter("acknowledge", "true"); + client().performRequest(startTrial); + + String noJobs = EntityUtils.toString( + client().performRequest(new Request("GET", "/_xpack/ml/anomaly_detectors")).getEntity()); + assertEquals("{\"count\":0,\"jobs\":[]}", noJobs); + + Request createJob = new Request("PUT", "/_xpack/ml/anomaly_detectors/test_job"); + createJob.setJsonEntity( + "{\n" + + " \"analysis_config\" : {\n" + + " \"bucket_span\": \"10m\",\n" + + " \"detectors\": [\n" + + " {\n" + + " \"function\": \"sum\",\n" + + " \"field_name\": \"total\"\n" + + " }\n" + + " ]\n" + + " },\n" + + " \"data_description\": {\n" + + " \"time_field\": \"timestamp\",\n" + + " \"time_format\": \"epoch_ms\"\n" + + " }\n" + + "}\n"); + client().performRequest(createJob); + } +} diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java index bfa856e381b..99132f0c89d 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.http; -import org.apache.http.message.BasicHeader; import org.apache.lucene.util.SetOnce; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; @@ -222,8 +221,8 @@ public class ContextAndHeaderTransportIT extends HttpSmokeTestCase { public void testThatRelevantHttpHeadersBecomeRequestHeaders() throws IOException { final String IRRELEVANT_HEADER = "SomeIrrelevantHeader"; Request request = new Request("GET", "/" + queryIndex + "/_search"); - request.setHeaders(new BasicHeader(CUSTOM_HEADER, randomHeaderValue), - new BasicHeader(IRRELEVANT_HEADER, randomHeaderValue)); + request.addHeader(CUSTOM_HEADER, randomHeaderValue); + request.addHeader(IRRELEVANT_HEADER, randomHeaderValue); Response response = getRestClient().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); List searchRequests = getRequests(SearchRequest.class); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsNotSetIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsNotSetIT.java index 4ab64abda45..2d139e7955e 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsNotSetIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsNotSetIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.http; -import org.apache.http.message.BasicHeader; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -33,7 +32,8 @@ public class CorsNotSetIT extends HttpSmokeTestCase { public void testCorsSettingDefaultBehaviourDoesNotReturnAnything() throws IOException { String corsValue = "http://localhost:9200"; Request request = new Request("GET", "/"); - request.setHeaders(new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue)); + request.addHeader("User-Agent", "Mozilla Bar"); + request.addHeader("Origin", corsValue); Response response = getRestClient().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), is(200)); assertThat(response.getHeader("Access-Control-Allow-Origin"), nullValue()); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsRegexIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsRegexIT.java index da48e51b63b..e79e8031550 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsRegexIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsRegexIT.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.http; -import org.apache.http.message.BasicHeader; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; @@ -53,25 +52,29 @@ public class CorsRegexIT extends HttpSmokeTestCase { } public void testThatRegularExpressionWorksOnMatch() throws IOException { - String corsValue = "http://localhost:9200"; - Request request = new Request("GET", "/"); - request.setHeaders(new BasicHeader("User-Agent", "Mozilla Bar"), - new BasicHeader("Origin", corsValue)); - Response response = getRestClient().performRequest(request); - assertResponseWithOriginheader(response, corsValue); - - corsValue = "https://localhost:9201"; - request.setHeaders(new BasicHeader("User-Agent", "Mozilla Bar"), - new BasicHeader("Origin", corsValue)); - response = getRestClient().performRequest(request); - assertResponseWithOriginheader(response, corsValue); - assertThat(response.getHeader("Access-Control-Allow-Credentials"), is("true")); + { + String corsValue = "http://localhost:9200"; + Request request = new Request("GET", "/"); + request.addHeader("User-Agent", "Mozilla Bar"); + request.addHeader("Origin", corsValue); + Response response = getRestClient().performRequest(request); + assertResponseWithOriginHeader(response, corsValue); + } + { + String corsValue = "https://localhost:9201"; + Request request = new Request("GET", "/"); + request.addHeader("User-Agent", "Mozilla Bar"); + request.addHeader("Origin", corsValue); + Response response = getRestClient().performRequest(request); + assertResponseWithOriginHeader(response, corsValue); + assertThat(response.getHeader("Access-Control-Allow-Credentials"), is("true")); + } } public void testThatRegularExpressionReturnsForbiddenOnNonMatch() throws IOException { Request request = new Request("GET", "/"); - request.setHeaders(new BasicHeader("User-Agent", "Mozilla Bar"), - new BasicHeader("Origin", "http://evil-host:9200")); + request.addHeader("User-Agent", "Mozilla Bar"); + request.addHeader("Origin", "http://evil-host:9200"); try { getRestClient().performRequest(request); fail("request should have failed"); @@ -85,7 +88,7 @@ public class CorsRegexIT extends HttpSmokeTestCase { public void testThatSendingNoOriginHeaderReturnsNoAccessControlHeader() throws IOException { Request request = new Request("GET", "/"); - request.setHeaders(new BasicHeader("User-Agent", "Mozilla Bar")); + request.addHeader("User-Agent", "Mozilla Bar"); Response response = getRestClient().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), is(200)); assertThat(response.getHeader("Access-Control-Allow-Origin"), nullValue()); @@ -100,20 +103,20 @@ public class CorsRegexIT extends HttpSmokeTestCase { public void testThatPreFlightRequestWorksOnMatch() throws IOException { String corsValue = "http://localhost:9200"; Request request = new Request("OPTIONS", "/"); - request.setHeaders(new BasicHeader("User-Agent", "Mozilla Bar"), - new BasicHeader("Origin", corsValue), - new BasicHeader("Access-Control-Request-Method", "GET")); + request.addHeader("User-Agent", "Mozilla Bar"); + request.addHeader("Origin", corsValue); + request.addHeader("Access-Control-Request-Method", "GET"); Response response = getRestClient().performRequest(request); - assertResponseWithOriginheader(response, corsValue); + assertResponseWithOriginHeader(response, corsValue); assertNotNull(response.getHeader("Access-Control-Allow-Methods")); } public void testThatPreFlightRequestReturnsNullOnNonMatch() throws IOException { String corsValue = "http://evil-host:9200"; Request request = new Request("OPTIONS", "/"); - request.setHeaders(new BasicHeader("User-Agent", "Mozilla Bar"), - new BasicHeader("Origin", corsValue), - new BasicHeader("Access-Control-Request-Method", "GET")); + request.addHeader("User-Agent", "Mozilla Bar"); + request.addHeader("Origin", corsValue); + request.addHeader("Access-Control-Request-Method", "GET"); try { getRestClient().performRequest(request); fail("request should have failed"); @@ -126,7 +129,7 @@ public class CorsRegexIT extends HttpSmokeTestCase { } } - protected static void assertResponseWithOriginheader(Response response, String expectedCorsHeader) { + private static void assertResponseWithOriginHeader(Response response, String expectedCorsHeader) { assertThat(response.getStatusLine().getStatusCode(), is(200)); assertThat(response.getHeader("Access-Control-Allow-Origin"), is(expectedCorsHeader)); } diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpCompressionIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpCompressionIT.java index 6af08577393..a9a0a0c7ed9 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpCompressionIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpCompressionIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.http; import org.apache.http.HttpHeaders; -import org.apache.http.message.BasicHeader; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.test.rest.ESRestTestCase; @@ -39,7 +38,7 @@ public class HttpCompressionIT extends ESRestTestCase { public void testCompressesResponseIfRequested() throws IOException { Request request = new Request("GET", "/"); - request.setHeaders(new BasicHeader(HttpHeaders.ACCEPT_ENCODING, GZIP_ENCODING)); + request.addHeader(HttpHeaders.ACCEPT_ENCODING, GZIP_ENCODING); Response response = client().performRequest(request); assertEquals(200, response.getStatusLine().getStatusCode()); assertEquals(GZIP_ENCODING, response.getHeader(HttpHeaders.CONTENT_ENCODING)); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/NoHandlerIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/NoHandlerIT.java index e1d55afea1b..976ba313115 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/NoHandlerIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/NoHandlerIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.http; -import org.apache.http.message.BasicHeader; import org.apache.http.util.EntityUtils; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -47,7 +46,7 @@ public class NoHandlerIT extends HttpSmokeTestCase { private void runTestNoHandlerRespectsAcceptHeader( final String accept, final String contentType, final String expect) throws IOException { Request request = new Request("GET", "/foo/bar/baz/qux/quux"); - request.setHeaders(new BasicHeader("Accept", accept)); + request.addHeader("Accept", accept); final ResponseException e = expectThrows(ResponseException.class, () -> getRestClient().performRequest(request)); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ResponseHeaderPluginIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ResponseHeaderPluginIT.java index b4dbc50d52d..ac2503f2c52 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ResponseHeaderPluginIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ResponseHeaderPluginIT.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.http; -import org.apache.http.message.BasicHeader; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; @@ -26,8 +25,8 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; -import java.util.ArrayList; import java.io.IOException; +import java.util.ArrayList; import java.util.Collection; import static org.hamcrest.Matchers.equalTo; @@ -62,7 +61,7 @@ public class ResponseHeaderPluginIT extends HttpSmokeTestCase { } Request request = new Request("GET", "/_protected"); - request.setHeaders(new BasicHeader("Secret", "password")); + request.addHeader("Secret", "password"); Response authResponse = getRestClient().performRequest(request); assertThat(authResponse.getStatusLine().getStatusCode(), equalTo(200)); assertThat(authResponse.getHeader("Secret"), equalTo("granted")); diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_alias/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_alias/10_basic.yml index 2c5419589ec..6338598de05 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_alias/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_alias/10_basic.yml @@ -20,6 +20,10 @@ setup: --- "Get all aliases via /_alias": + - do: + indices.create: + index: test_index_3 + - do: indices.get_alias: {} @@ -27,7 +31,41 @@ setup: - match: {test_index.aliases.test_blias: {}} - match: {test_index_2.aliases.test_alias: {}} - match: {test_index_2.aliases.test_blias: {}} + - match: {test_index_3.aliases: {}} +--- +"Get aliases via /_alias/_all": + + - do: + indices.create: + index: test_index_3 + + - do: + indices.get_alias: + name: _all + + - match: {test_index.aliases.test_alias: {}} + - match: {test_index.aliases.test_blias: {}} + - match: {test_index_2.aliases.test_alias: {}} + - match: {test_index_2.aliases.test_blias: {}} + - is_false: test_index_3 + +--- +"Get aliases via /_alias/*": + + - do: + indices.create: + index: test_index_3 + + - do: + indices.get_alias: + name: _all + + - match: {test_index.aliases.test_alias: {}} + - match: {test_index.aliases.test_blias: {}} + - match: {test_index_2.aliases.test_alias: {}} + - match: {test_index_2.aliases.test_blias: {}} + - is_false: test_index_3 --- "Get all aliases via /{index}/_alias/": diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/10_basic.yml index 40e9d705ea4..884a50507c7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/10_basic.yml @@ -45,9 +45,8 @@ setup: "Nested doc version and seqIDs": - skip: - # fixed in 6.0.1 - version: " - 6.0.0" - reason: "version and seq IDs where not accurate in previous versions" + version: " - 6.3.99" + reason: "object notation for docvalue_fields was introduced in 6.4" - do: index: @@ -61,7 +60,7 @@ setup: - do: search: - body: { "query" : { "nested" : { "path" : "nested_field", "query" : { "match_all" : {} }, "inner_hits" : { version: true, "docvalue_fields": ["_seq_no"]} }}, "version": true, "docvalue_fields" : ["_seq_no"] } + body: { "query" : { "nested" : { "path" : "nested_field", "query" : { "match_all" : {} }, "inner_hits" : { version: true, "docvalue_fields": [ { "field": "_seq_no", "format": "use_field_mapping" } ]} }}, "version": true, "docvalue_fields" : [ { "field": "_seq_no", "format": "use_field_mapping" } ] } - match: { hits.total: 1 } - match: { hits.hits.0._index: "test" } @@ -84,7 +83,7 @@ setup: - do: search: - body: { "query" : { "nested" : { "path" : "nested_field", "query" : { "match_all" : {} }, "inner_hits" : { version: true, "docvalue_fields": ["_seq_no"]} }}, "version": true, "docvalue_fields" : ["_seq_no"] } + body: { "query" : { "nested" : { "path" : "nested_field", "query" : { "match_all" : {} }, "inner_hits" : { version: true, "docvalue_fields": [ { "field": "_seq_no", "format": "use_field_mapping" } ]} }}, "version": true, "docvalue_fields" : [ { "field": "_seq_no", "format": "use_field_mapping" } ] } - match: { hits.total: 1 } - match: { hits.hits.0._index: "test" } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml index 3830a68b28f..59692873cc4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml @@ -133,7 +133,53 @@ setup: --- "docvalue_fields": + - skip: + version: " - 6.3.99" + reason: format option was added in 6.4 + features: warnings - do: + warnings: + - 'Doc-value field [count] is not using a format. The output will change in 7.0 when doc value fields get formatted based on mappings by default. It is recommended to pass [format=use_field_mapping] with the doc value field in order to opt in for the future behaviour and ease the migration to 7.0.' + search: + body: + docvalue_fields: [ "count" ] + - match: { hits.hits.0.fields.count: [1] } + +--- +"docvalue_fields as url param": + - skip: + version: " - 6.3.99" + reason: format option was added in 6.4 + features: warnings + - do: + warnings: + - 'Doc-value field [count] is not using a format. The output will change in 7.0 when doc value fields get formatted based on mappings by default. It is recommended to pass [format=use_field_mapping] with the doc value field in order to opt in for the future behaviour and ease the migration to 7.0.' search: docvalue_fields: [ "count" ] - match: { hits.hits.0.fields.count: [1] } + +--- +"docvalue_fields with default format": + - skip: + version: " - 6.3.99" + reason: format option was added in 6.4 + - do: + search: + body: + docvalue_fields: + - field: "count" + format: "use_field_mapping" + - match: { hits.hits.0.fields.count: [1] } + +--- +"docvalue_fields with explicit format": + - skip: + version: " - 6.3.99" + reason: format option was added in 6.4 + - do: + search: + body: + docvalue_fields: + - field: "count" + format: "#.0" + - match: { hits.hits.0.fields.count: ["1.0"] } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/30_limits.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/30_limits.yml index da9c739ed66..905635e1d10 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/30_limits.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/30_limits.yml @@ -62,6 +62,9 @@ setup: --- "Docvalues_fields size limit": + - skip: + version: " - 6.3.99" + reason: "The object notation for docvalue_fields is only supported on 6.4+" - do: catch: /Trying to retrieve too many docvalue_fields\. Must be less than or equal to[:] \[2\] but was \[3\]\. This limit can be set by changing the \[index.max_docvalue_fields_search\] index level setting\./ search: @@ -69,7 +72,13 @@ setup: body: query: match_all: {} - docvalue_fields: ["one", "two", "three"] + docvalue_fields: + - field: "one" + format: "use_field_mapping" + - field: "two" + format: "use_field_mapping" + - field: "three" + format: "use_field_mapping" --- "Script_fields size limit": diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.get_repository/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.get_repository/10_basic.yml index b944fe43791..47f5ac0934c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.get_repository/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.get_repository/10_basic.yml @@ -51,6 +51,9 @@ setup: --- "Verify created repository": + - skip: + version: " - 6.99.99" + reason: AwaitsFix for https://github.com/elastic/elasticsearch/issues/30807 - do: snapshot.verify_repository: repository: test_repo_get_2 diff --git a/server/licenses/lucene-analyzers-common-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-analyzers-common-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 14f5fcb381f..00000000000 --- a/server/licenses/lucene-analyzers-common-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b70d03784d06a643e096fae4d959200aa246ba16 \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-analyzers-common-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 00000000000..1c471a77d80 --- /dev/null +++ b/server/licenses/lucene-analyzers-common-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +96ab108569c77932ecb17c45421affece207df5c \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-backward-codecs-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 47afb59e45e..00000000000 --- a/server/licenses/lucene-backward-codecs-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d660a63ac0f7ab2772a45ae518518472bf620620 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-backward-codecs-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 00000000000..4c8842872ab --- /dev/null +++ b/server/licenses/lucene-backward-codecs-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +72d09ca50979f716a57f53f2de33d55023a166ec \ No newline at end of file diff --git a/server/licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 50392f59374..00000000000 --- a/server/licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bf8f9e8284a54af18545574cb4a530da0deb968a \ No newline at end of file diff --git a/server/licenses/lucene-core-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-core-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 00000000000..4aecfc6a550 --- /dev/null +++ b/server/licenses/lucene-core-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +e118e4d05070378516b9055184b74498ba528dee \ No newline at end of file diff --git a/server/licenses/lucene-grouping-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-grouping-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 27d7aaab2f5..00000000000 --- a/server/licenses/lucene-grouping-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9eaae9dcd4ec88227475cb81d3be9afa767f1b22 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-grouping-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 00000000000..948aacf662f --- /dev/null +++ b/server/licenses/lucene-grouping-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +2b2ea6bfe6fa159bbf205bf7f7fa2ed2c22bbffc \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-highlighter-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 176c3a86afe..00000000000 --- a/server/licenses/lucene-highlighter-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cd15f0008742c84899d678cb0cecda06d0a6d63e \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-highlighter-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 00000000000..30a960c5a80 --- /dev/null +++ b/server/licenses/lucene-highlighter-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +423e4fff9276101d845d6073dc6cd27504def207 \ No newline at end of file diff --git a/server/licenses/lucene-join-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-join-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 0bfe9cfb79a..00000000000 --- a/server/licenses/lucene-join-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5ce38b8610a7f402f2da3b0e408e508151d979c5 \ No newline at end of file diff --git a/server/licenses/lucene-join-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-join-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 00000000000..fb3cd72c755 --- /dev/null +++ b/server/licenses/lucene-join-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +27561038da2edcae3ecc3a08b0a52824966af87a \ No newline at end of file diff --git a/server/licenses/lucene-memory-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-memory-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index c1a0127e2ce..00000000000 --- a/server/licenses/lucene-memory-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -53819f03a07050a4af28361d64395c86f2cea008 \ No newline at end of file diff --git a/server/licenses/lucene-memory-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-memory-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 00000000000..cd989836ab2 --- /dev/null +++ b/server/licenses/lucene-memory-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +d7d422159f705261784d121e24877119d9c95083 \ No newline at end of file diff --git a/server/licenses/lucene-misc-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-misc-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 615a0dec0c0..00000000000 --- a/server/licenses/lucene-misc-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8cdc0e2b65d146ed11f4d2507109e530d59ff33d \ No newline at end of file diff --git a/server/licenses/lucene-misc-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-misc-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 00000000000..c4d8ad61c73 --- /dev/null +++ b/server/licenses/lucene-misc-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +fc09508fde6ba87f241d7e3148d9e310c0db9cb9 \ No newline at end of file diff --git a/server/licenses/lucene-queries-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-queries-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 12f5eff262e..00000000000 --- a/server/licenses/lucene-queries-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e56090463703112ad64ad457d18bae9a5b2966b8 \ No newline at end of file diff --git a/server/licenses/lucene-queries-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-queries-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 00000000000..0cb51736803 --- /dev/null +++ b/server/licenses/lucene-queries-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +201fdf3432ff3fef0f48c38c2c0f482c144f6868 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-queryparser-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index a787a00541a..00000000000 --- a/server/licenses/lucene-queryparser-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9faf974b77058e44a6d35e956db4f5fb67389dfa \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-queryparser-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 00000000000..ecd6440ba64 --- /dev/null +++ b/server/licenses/lucene-queryparser-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +917df8c8d08952a012a34050b183b6204ae7081b \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-sandbox-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 7d95cd6b3b6..00000000000 --- a/server/licenses/lucene-sandbox-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b852b1fe70ef70736b2b1a9ad57eb93cbaed0423 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-sandbox-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 00000000000..3e65eaeef91 --- /dev/null +++ b/server/licenses/lucene-sandbox-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +caff84fa66cb0376835c39f3d4ca7dfd2177d8f4 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-spatial-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index ac0598b3f0c..00000000000 --- a/server/licenses/lucene-spatial-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d2fa99ec7140fcf35db16ac1feb78ef142750d39 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-spatial-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 00000000000..c86854b16c3 --- /dev/null +++ b/server/licenses/lucene-spatial-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +e1bce61a9d9129a8d0fdd3127a84665d29f53eb0 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-spatial-extras-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index a2537dbdde5..00000000000 --- a/server/licenses/lucene-spatial-extras-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c9963f60d3a0924b877a6f910650c5f2384822a0 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-spatial-extras-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 00000000000..144984a3869 --- /dev/null +++ b/server/licenses/lucene-spatial-extras-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +3a2e4373d79fda968a078971efa2cb8ec9ff65b0 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-spatial3d-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 6844bcd13b2..00000000000 --- a/server/licenses/lucene-spatial3d-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3f33ba54da5e0e125f4c5ef7dd800dd6185e4f61 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-spatial3d-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 00000000000..fd19f4ad811 --- /dev/null +++ b/server/licenses/lucene-spatial3d-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +7f14927e5c3c1c85c4c5b3681c28c5e36f241dda \ No newline at end of file diff --git a/server/licenses/lucene-suggest-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-suggest-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 0343db2d944..00000000000 --- a/server/licenses/lucene-suggest-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bb3c18c987395dae6fe63744f5a50fd367ea5a74 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-suggest-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 00000000000..ba405960dbe --- /dev/null +++ b/server/licenses/lucene-suggest-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +6e708a38c957a655e0cfedb06a1b9aa892929db0 \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryResponse.java index 27612a3dab2..c3fb2d58beb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryResponse.java @@ -19,23 +19,112 @@ package org.elasticsearch.action.admin.cluster.repositories.verify; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; /** - * Unregister repository response + * Verify repository response */ public class VerifyRepositoryResponse extends ActionResponse implements ToXContentObject { - private DiscoveryNode[] nodes; + static final String NODES = "nodes"; + static final String NAME = "name"; + + public static class NodeView implements Writeable, ToXContentObject { + private static final ObjectParser.NamedObjectParser PARSER; + static { + ObjectParser internalParser = new ObjectParser<>(NODES); + internalParser.declareString(NodeView::setName, new ParseField(NAME)); + PARSER = (p, v, name) -> internalParser.parse(p, new NodeView(name), null); + } + + final String nodeId; + String name; + + public NodeView(String nodeId) { this.nodeId = nodeId; } + + public NodeView(String nodeId, String name) { + this(nodeId); + this.name = name; + } + + public NodeView(StreamInput in) throws IOException { + this(in.readString(), in.readString()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(nodeId); + out.writeString(name); + } + + void setName(String name) { this.name = name; } + + public String getName() { return name; } + + public String getNodeId() { return nodeId; } + + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(nodeId); + { + builder.field(NAME, name); + } + builder.endObject(); + return builder; + } + + /** + * Temporary method that allows turning a {@link NodeView} into a {@link DiscoveryNode}. This representation will never be used in + * practice, because in >= 6.4 a consumer of the response will only be able to retrieve a representation of {@link NodeView} + * objects. + * + * Effectively this will be used to hold the state of the object in 6.x so there is no need to have 2 backing objects that + * represent the state of the Response. In practice these will always be read by a consumer as a NodeView, but it eases the + * transition to master which will not contain any representation of a {@link DiscoveryNode}. + */ + DiscoveryNode convertToDiscoveryNode() { + return new DiscoveryNode(name, nodeId, "", "", "", new TransportAddress(TransportAddress.META_ADDRESS, 0), + Collections.emptyMap(), Collections.emptySet(), Version.CURRENT); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + NodeView other = (NodeView) obj; + return Objects.equals(nodeId, other.nodeId) && + Objects.equals(name, other.name); + } + + @Override + public int hashCode() { + return Objects.hash(nodeId, name); + } + } + + private List nodes; private ClusterName clusterName; @@ -45,53 +134,56 @@ public class VerifyRepositoryResponse extends ActionResponse implements ToXConte public VerifyRepositoryResponse(ClusterName clusterName, DiscoveryNode[] nodes) { this.clusterName = clusterName; - this.nodes = nodes; + this.nodes = Arrays.asList(nodes); } @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - clusterName = new ClusterName(in); - nodes = new DiscoveryNode[in.readVInt()]; - for (int i=0; i n.convertToDiscoveryNode()).collect(Collectors.toList()); + } else { + clusterName = new ClusterName(in); + this.nodes = in.readList(DiscoveryNode::new); } } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - clusterName.writeTo(out); - out.writeVInt(nodes.length); - for (DiscoveryNode node : nodes) { - node.writeTo(out); + if (Version.CURRENT.onOrAfter(Version.V_7_0_0_alpha1)) { + out.writeList(getNodes()); + } else { + clusterName.writeTo(out); + out.writeList(nodes); } } - public DiscoveryNode[] getNodes() { - return nodes; + public List getNodes() { + return nodes.stream().map(dn -> new NodeView(dn.getId(), dn.getName())).collect(Collectors.toList()); } public ClusterName getClusterName() { return clusterName; } - static final class Fields { - static final String NODES = "nodes"; - static final String NAME = "name"; - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.startObject(Fields.NODES); - for (DiscoveryNode node : nodes) { - builder.startObject(node.getId()); - builder.field(Fields.NAME, node.getName()); + { + builder.startObject(NODES); + { + for (DiscoveryNode node : nodes) { + builder.startObject(node.getId()); + { + builder.field(NAME, node.getName()); + } + builder.endObject(); + } + } builder.endObject(); } builder.endObject(); - builder.endObject(); return builder; } diff --git a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java index 1e11f126bb6..d9ed6e6792f 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java @@ -153,7 +153,7 @@ final class ExpandSearchPhase extends SearchPhase { } } if (options.getDocValueFields() != null) { - options.getDocValueFields().forEach(groupSource::docValueField); + options.getDocValueFields().forEach(ff -> groupSource.docValueField(ff.field, ff.format)); } if (options.getStoredFieldsContext() != null && options.getStoredFieldsContext().fieldNames() != null) { options.getStoredFieldsContext().fieldNames().forEach(groupSource::storedField); diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java index 91ac46c1d62..424db04ce39 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java @@ -290,11 +290,21 @@ public class SearchRequestBuilder extends ActionRequestBuilder extends ClusterState * @param e optional error that might have been thrown */ public void onAllNodesAcked(@Nullable Exception e) { - listener.onResponse(newResponse(true)); + listener.onResponse(newResponse(e == null)); } protected abstract Response newResponse(boolean acknowledged); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index 7af2ec2d237..db45ce6c9e3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -181,8 +181,7 @@ public class IndexMetaData implements Diffable, ToXContentFragmen if (maxNumShards < 1) { throw new IllegalArgumentException("es.index.max_number_of_shards must be > 0"); } - return Setting.intSetting(SETTING_NUMBER_OF_SHARDS, Math.min(5, maxNumShards), 1, maxNumShards, - Property.IndexScope, Property.Final); + return Setting.intSetting(SETTING_NUMBER_OF_SHARDS, 1, 1, maxNumShards, Property.IndexScope, Property.Final); } public static final String INDEX_SETTING_PREFIX = "index."; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index b8e898cf6f5..82d947b4158 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -363,7 +363,7 @@ public class MetaDataMappingService extends AbstractComponent { @Override public void onAllNodesAcked(@Nullable Exception e) { - listener.onResponse(new ClusterStateUpdateResponse(true)); + listener.onResponse(new ClusterStateUpdateResponse(e == null)); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java index 54a6568af3f..1757548c28b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java @@ -563,7 +563,7 @@ public class MasterService extends AbstractLifecycleComponent { private final AckedClusterStateTaskListener ackedTaskListener; private final CountDown countDown; - private final DiscoveryNodes nodes; + private final DiscoveryNode masterNode; private final long clusterStateVersion; private final Future ackTimeoutCallback; private Exception lastFailure; @@ -572,15 +572,14 @@ public class MasterService extends AbstractLifecycleComponent { ThreadPool threadPool) { this.ackedTaskListener = ackedTaskListener; this.clusterStateVersion = clusterStateVersion; - this.nodes = nodes; + this.masterNode = nodes.getMasterNode(); int countDown = 0; for (DiscoveryNode node : nodes) { - if (ackedTaskListener.mustAck(node)) { + //we always wait for at least the master node + if (node.equals(masterNode) || ackedTaskListener.mustAck(node)) { countDown++; } } - //we always wait for at least 1 node (the master) - countDown = Math.max(1, countDown); logger.trace("expecting {} acknowledgements for cluster_state update (version: {})", countDown, clusterStateVersion); this.countDown = new CountDown(countDown); this.ackTimeoutCallback = threadPool.schedule(ackedTaskListener.ackTimeout(), ThreadPool.Names.GENERIC, () -> onTimeout()); @@ -588,11 +587,8 @@ public class MasterService extends AbstractLifecycleComponent { @Override public void onNodeAck(DiscoveryNode node, @Nullable Exception e) { - if (!ackedTaskListener.mustAck(node)) { - //we always wait for the master ack anyway - if (!node.equals(nodes.getMasterNode())) { - return; - } + if (node.equals(masterNode) == false && ackedTaskListener.mustAck(node) == false) { + return; } if (e == null) { logger.trace("ack received from node [{}], cluster_state update (version: {})", node, clusterStateVersion); diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/NodesFaultDetection.java b/server/src/main/java/org/elasticsearch/discovery/zen/NodesFaultDetection.java index 218e6e3f63f..d19cc98441b 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/NodesFaultDetection.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/NodesFaultDetection.java @@ -44,6 +44,7 @@ import java.util.Collections; import java.util.Set; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CopyOnWriteArrayList; +import java.util.function.Supplier; import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; @@ -66,13 +67,16 @@ public class NodesFaultDetection extends FaultDetection { private final ConcurrentMap nodesFD = newConcurrentMap(); - private volatile long clusterStateVersion = ClusterState.UNKNOWN_VERSION; + private final Supplier clusterStateSupplier; private volatile DiscoveryNode localNode; - public NodesFaultDetection(Settings settings, ThreadPool threadPool, TransportService transportService, ClusterName clusterName) { + public NodesFaultDetection(Settings settings, ThreadPool threadPool, TransportService transportService, + Supplier clusterStateSupplier, ClusterName clusterName) { super(settings, threadPool, transportService, clusterName); + this.clusterStateSupplier = clusterStateSupplier; + logger.debug("[node ] uses ping_interval [{}], ping_timeout [{}], ping_retries [{}]", pingInterval, pingRetryTimeout, pingRetryCount); @@ -208,15 +212,18 @@ public class NodesFaultDetection extends FaultDetection { return NodeFD.this.equals(nodesFD.get(node)); } + private PingRequest newPingRequest() { + return new PingRequest(node, clusterName, localNode, clusterStateSupplier.get().version()); + } + @Override public void run() { if (!running()) { return; } - final PingRequest pingRequest = new PingRequest(node, clusterName, localNode, clusterStateVersion); final TransportRequestOptions options = TransportRequestOptions.builder().withType(TransportRequestOptions.Type.PING) .withTimeout(pingRetryTimeout).build(); - transportService.sendRequest(node, PING_ACTION_NAME, pingRequest, options, new TransportResponseHandler() { + transportService.sendRequest(node, PING_ACTION_NAME, newPingRequest(), options, new TransportResponseHandler() { @Override public PingResponse newInstance() { return new PingResponse(); @@ -254,7 +261,7 @@ public class NodesFaultDetection extends FaultDetection { } } else { // resend the request, not reschedule, rely on send timeout - transportService.sendRequest(node, PING_ACTION_NAME, pingRequest, options, this); + transportService.sendRequest(node, PING_ACTION_NAME, newPingRequest(), options, this); } } diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/server/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index 4621e6769e9..02b2822fcf4 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -205,7 +205,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover this.masterFD = new MasterFaultDetection(settings, threadPool, transportService, this::clusterState, masterService, clusterName); this.masterFD.addListener(new MasterNodeFailureListener()); - this.nodesFD = new NodesFaultDetection(settings, threadPool, transportService, clusterName); + this.nodesFD = new NodesFaultDetection(settings, threadPool, transportService, this::clusterState, clusterName); this.nodesFD.addListener(new NodeFaultDetectionListener()); this.pendingStatesQueue = new PendingClusterStatesQueue(logger, MAX_PENDING_CLUSTER_STATES_SETTING.get(settings)); diff --git a/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java b/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java index 6bb8e0259fb..92da1bc3b65 100644 --- a/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java @@ -33,6 +33,7 @@ import org.elasticsearch.script.Script; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder.ScriptField; import org.elasticsearch.search.fetch.StoredFieldsContext; +import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext.FieldAndFormat; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; import org.elasticsearch.search.sort.SortBuilder; @@ -45,6 +46,7 @@ import java.util.Iterator; import java.util.List; import java.util.Objects; import java.util.Set; +import java.util.stream.Collectors; import static org.elasticsearch.common.xcontent.XContentParser.Token.END_OBJECT; @@ -65,7 +67,8 @@ public final class InnerHitBuilder implements Writeable, ToXContentObject { PARSER.declareBoolean(InnerHitBuilder::setVersion, SearchSourceBuilder.VERSION_FIELD); PARSER.declareBoolean(InnerHitBuilder::setTrackScores, SearchSourceBuilder.TRACK_SCORES_FIELD); PARSER.declareStringArray(InnerHitBuilder::setStoredFieldNames, SearchSourceBuilder.STORED_FIELDS_FIELD); - PARSER.declareStringArray(InnerHitBuilder::setDocValueFields, SearchSourceBuilder.DOCVALUE_FIELDS_FIELD); + PARSER.declareObjectArray(InnerHitBuilder::setDocValueFields, + (p,c) -> FieldAndFormat.fromXContent(p), SearchSourceBuilder.DOCVALUE_FIELDS_FIELD); PARSER.declareField((p, i, c) -> { try { Set scriptFields = new HashSet<>(); @@ -102,7 +105,7 @@ public final class InnerHitBuilder implements Writeable, ToXContentObject { private StoredFieldsContext storedFieldsContext; private QueryBuilder query = DEFAULT_INNER_HIT_QUERY; private List> sorts; - private List docValueFields; + private List docValueFields; private Set scriptFields; private HighlightBuilder highlightBuilder; private FetchSourceContext fetchSourceContext; @@ -134,7 +137,18 @@ public final class InnerHitBuilder implements Writeable, ToXContentObject { version = in.readBoolean(); trackScores = in.readBoolean(); storedFieldsContext = in.readOptionalWriteable(StoredFieldsContext::new); - docValueFields = (List) in.readGenericValue(); + if (in.getVersion().before(Version.V_6_4_0)) { + List fieldList = (List) in.readGenericValue(); + if (fieldList == null) { + docValueFields = null; + } else { + docValueFields = fieldList.stream() + .map(field -> new FieldAndFormat(field, null)) + .collect(Collectors.toList()); + } + } else { + docValueFields = in.readBoolean() ? in.readList(FieldAndFormat::new) : null; + } if (in.readBoolean()) { int size = in.readVInt(); scriptFields = new HashSet<>(size); @@ -174,7 +188,16 @@ public final class InnerHitBuilder implements Writeable, ToXContentObject { out.writeBoolean(version); out.writeBoolean(trackScores); out.writeOptionalWriteable(storedFieldsContext); - out.writeGenericValue(docValueFields); + if (out.getVersion().before(Version.V_6_4_0)) { + out.writeGenericValue(docValueFields == null + ? null + : docValueFields.stream().map(ff -> ff.field).collect(Collectors.toList())); + } else { + out.writeBoolean(docValueFields != null); + if (docValueFields != null) { + out.writeList(docValueFields); + } + } boolean hasScriptFields = scriptFields != null; out.writeBoolean(hasScriptFields); if (hasScriptFields) { @@ -248,7 +271,9 @@ public final class InnerHitBuilder implements Writeable, ToXContentObject { out.writeBoolean(version); out.writeBoolean(trackScores); out.writeOptionalWriteable(storedFieldsContext); - out.writeGenericValue(docValueFields); + out.writeGenericValue(docValueFields == null + ? null + : docValueFields.stream().map(ff -> ff.field).collect(Collectors.toList())); boolean hasScriptFields = scriptFields != null; out.writeBoolean(hasScriptFields); if (hasScriptFields) { @@ -390,14 +415,14 @@ public final class InnerHitBuilder implements Writeable, ToXContentObject { /** * Gets the docvalue fields. */ - public List getDocValueFields() { + public List getDocValueFields() { return docValueFields; } /** * Sets the stored fields to load from the docvalue and return. */ - public InnerHitBuilder setDocValueFields(List docValueFields) { + public InnerHitBuilder setDocValueFields(List docValueFields) { this.docValueFields = docValueFields; return this; } @@ -405,14 +430,21 @@ public final class InnerHitBuilder implements Writeable, ToXContentObject { /** * Adds a field to load from the docvalue and return. */ - public InnerHitBuilder addDocValueField(String field) { + public InnerHitBuilder addDocValueField(String field, String format) { if (docValueFields == null) { docValueFields = new ArrayList<>(); } - docValueFields.add(field); + docValueFields.add(new FieldAndFormat(field, null)); return this; } + /** + * Adds a field to load from doc values and return. + */ + public InnerHitBuilder addDocValueField(String field) { + return addDocValueField(field, null); + } + public Set getScriptFields() { return scriptFields; } @@ -489,8 +521,15 @@ public final class InnerHitBuilder implements Writeable, ToXContentObject { } if (docValueFields != null) { builder.startArray(SearchSourceBuilder.DOCVALUE_FIELDS_FIELD.getPreferredName()); - for (String docValueField : docValueFields) { - builder.value(docValueField); + for (FieldAndFormat docValueField : docValueFields) { + if (docValueField.format == null) { + builder.value(docValueField.field); + } else { + builder.startObject() + .field("field", docValueField.field) + .field("format", docValueField.format) + .endObject(); + } } builder.endArray(); } diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java b/server/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java index 60741c87f21..2695c172849 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java @@ -52,8 +52,10 @@ public class IndicesQueryCache extends AbstractComponent implements QueryCache, public static final Setting INDICES_CACHE_QUERY_SIZE_SETTING = Setting.memorySizeSetting("indices.queries.cache.size", "10%", Property.NodeScope); + // mostly a way to prevent queries from being the main source of memory usage + // of the cache public static final Setting INDICES_CACHE_QUERY_COUNT_SETTING = - Setting.intSetting("indices.queries.cache.count", 1000, 1, Property.NodeScope); + Setting.intSetting("indices.queries.cache.count", 10_000, 1, Property.NodeScope); // enables caching on all segments instead of only the larger ones, for testing only public static final Setting INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING = Setting.boolSetting("indices.queries.cache.all_segments", false, Property.NodeScope); diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 054b91dc511..44ecb6b04d6 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -230,6 +230,7 @@ public class Node implements Closeable { private final Lifecycle lifecycle = new Lifecycle(); private final Injector injector; private final Settings settings; + private final Settings originalSettings; private final Environment environment; private final NodeEnvironment nodeEnvironment; private final PluginsService pluginsService; @@ -260,6 +261,7 @@ public class Node implements Closeable { logger.info("initializing ..."); } try { + originalSettings = environment.settings(); Settings tmpSettings = Settings.builder().put(environment.settings()) .put(Client.CLIENT_TYPE_SETTING_S.getKey(), CLIENT_TYPE).build(); @@ -563,7 +565,14 @@ public class Node implements Closeable { } /** - * The settings that were used to create the node. + * The original settings that were used to create the node + */ + public Settings originalSettings() { + return originalSettings; + } + + /** + * The settings that are used by this node. Contains original settings as well as additional settings provided by plugins. */ public Settings settings() { return this.settings; diff --git a/server/src/main/java/org/elasticsearch/plugins/ClusterPlugin.java b/server/src/main/java/org/elasticsearch/plugins/ClusterPlugin.java index 5e58aa5a3a9..61145c7a1d7 100644 --- a/server/src/main/java/org/elasticsearch/plugins/ClusterPlugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/ClusterPlugin.java @@ -70,6 +70,8 @@ public interface ClusterPlugin { * Returns a map of {@link ClusterState.Custom} supplier that should be invoked to initialize the initial clusterstate. * This allows custom clusterstate extensions to be always present and prevents invariants where clusterstates are published * but customs are not initialized. + * + * TODO: Remove this whole concept of InitialClusterStateCustomSupplier, it's not used anymore */ default Map> getInitialClusterStateCustomSupplier() { return Collections.emptyMap(); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java index 3098dc03a8c..4074d1a8fa1 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java @@ -214,7 +214,7 @@ public class RestSearchAction extends BaseRestHandler { if (Strings.hasText(sDocValueFields)) { String[] sFields = Strings.splitStringByCommaToArray(sDocValueFields); for (String field : sFields) { - searchSourceBuilder.docValueField(field); + searchSourceBuilder.docValueField(field, null); } } } diff --git a/server/src/main/java/org/elasticsearch/search/DocValueFormat.java b/server/src/main/java/org/elasticsearch/search/DocValueFormat.java index 1824f17941b..8677370fc99 100644 --- a/server/src/main/java/org/elasticsearch/search/DocValueFormat.java +++ b/server/src/main/java/org/elasticsearch/search/DocValueFormat.java @@ -49,17 +49,17 @@ public interface DocValueFormat extends NamedWriteable { /** Format a long value. This is used by terms and histogram aggregations * to format keys for fields that use longs as a doc value representation * such as the {@code long} and {@code date} fields. */ - String format(long value); + Object format(long value); /** Format a double value. This is used by terms and stats aggregations * to format keys for fields that use numbers as a doc value representation * such as the {@code long}, {@code double} or {@code date} fields. */ - String format(double value); + Object format(double value); /** Format a binary value. This is used by terms aggregations to format * keys for fields that use binary doc value representations such as the * {@code keyword} and {@code ip} fields. */ - String format(BytesRef value); + Object format(BytesRef value); /** Parse a value that was formatted with {@link #format(long)} back to the * original long value. */ @@ -85,13 +85,13 @@ public interface DocValueFormat extends NamedWriteable { } @Override - public String format(long value) { - return Long.toString(value); + public Long format(long value) { + return value; } @Override - public String format(double value) { - return Double.toString(value); + public Double format(double value) { + return value; } @Override @@ -235,13 +235,13 @@ public interface DocValueFormat extends NamedWriteable { } @Override - public String format(long value) { - return java.lang.Boolean.valueOf(value != 0).toString(); + public Boolean format(long value) { + return java.lang.Boolean.valueOf(value != 0); } @Override - public String format(double value) { - return java.lang.Boolean.valueOf(value != 0).toString(); + public Boolean format(double value) { + return java.lang.Boolean.valueOf(value != 0); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java index c72e9d22dc0..bb391f21f1e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java @@ -407,8 +407,8 @@ public class DateHistogramAggregationBuilder extends ValuesSourceAggregationBuil final long high = nextTransition; final DocValueFormat format = ft.docValueFormat(null, null); - final String formattedLow = format.format(low); - final String formattedHigh = format.format(high); + final Object formattedLow = format.format(low); + final Object formattedHigh = format.format(high); if (ft.isFieldWithinQuery(reader, formattedLow, formattedHigh, true, false, tz, null, context) == Relation.WITHIN) { // All values in this reader have the same offset despite daylight saving times. diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java index dfa12db0cd3..84dec2c983e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java @@ -107,7 +107,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation< @Override public String getKeyAsString() { - return format.format(key); + return format.format(key).toString(); } @Override @@ -138,7 +138,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation< @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - String keyAsString = format.format(key); + String keyAsString = format.format(key).toString(); if (keyed) { builder.startObject(keyAsString); } else { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java index b3516b04dfc..1831e012a31 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java @@ -103,7 +103,7 @@ public final class InternalHistogram extends InternalMultiBucketAggregation @Override public String getKeyAsString() { - return format.format(term); + return format.format(term).toString(); } @Override @@ -90,7 +90,7 @@ public class LongTerms extends InternalMappedTerms protected final XContentBuilder keyToXContent(XContentBuilder builder) throws IOException { builder.field(CommonFields.KEY.getPreferredName(), term); if (format != DocValueFormat.RAW) { - builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), format.format(term)); + builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), format.format(term).toString()); } return builder; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java index 11d0b40c7ce..4971f74f03d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java @@ -80,7 +80,7 @@ public class StringTerms extends InternalMappedTerms> sorts = null; private HighlightBuilder highlightBuilder; private StoredFieldsContext storedFieldsContext; - private List fieldDataFields; + private List docValueFields; private Set scriptFields; private FetchSourceContext fetchSourceContext; @@ -91,7 +92,7 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder(clone.fieldDataFields); + this.docValueFields = clone.docValueFields == null ? null : new ArrayList<>(clone.docValueFields); this.scriptFields = clone.scriptFields == null ? null : new HashSet<>(clone.scriptFields); this.fetchSourceContext = clone.fetchSourceContext == null ? null : new FetchSourceContext(clone.fetchSourceContext.fetchSource(), clone.fetchSourceContext.includes(), @@ -112,9 +113,9 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder(size); + docValueFields = new ArrayList<>(size); for (int i = 0; i < size; i++) { - fieldDataFields.add(in.readString()); + docValueFields.add(new FieldAndFormat(in)); } } storedFieldsContext = in.readOptionalWriteable(StoredFieldsContext::new); @@ -143,12 +144,12 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder(); + if (docValueFields == null) { + docValueFields = new ArrayList<>(); } - fieldDataFields.add(fieldDataField); + docValueFields.add(new FieldAndFormat(docValueField, format)); return this; } /** - * Adds fields to load from the field data cache and return as part of + * Adds a field to load from doc values and return as part of * the search request. */ - public TopHitsAggregationBuilder fieldDataFields(List fieldDataFields) { - if (fieldDataFields == null) { - throw new IllegalArgumentException("[fieldDataFields] must not be null: [" + name + "]"); - } - if (this.fieldDataFields == null) { - this.fieldDataFields = new ArrayList<>(); - } - this.fieldDataFields.addAll(fieldDataFields); - return this; + public TopHitsAggregationBuilder docValueField(String docValueField) { + return docValueField(docValueField, null); } /** * Gets the field-data fields. */ - public List fieldDataFields() { - return fieldDataFields; + public List fieldDataFields() { + return docValueFields; } /** @@ -587,7 +581,7 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder fieldDataFields = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - if (token == XContentParser.Token.VALUE_STRING) { - fieldDataFields.add(parser.text()); - } else { - throw new ParsingException(parser.getTokenLocation(), "Expected [" + XContentParser.Token.VALUE_STRING - + "] in [" + currentFieldName + "] but found [" + token + "]", parser.getTokenLocation()); - } + FieldAndFormat ff = FieldAndFormat.fromXContent(parser); + factory.docValueField(ff.field, ff.format); } - factory.fieldDataFields(fieldDataFields); } else if (SearchSourceBuilder.SORT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { List> sorts = SortBuilder.fromXContent(parser); factory.sorts(sorts); @@ -752,7 +746,7 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder sort; private final HighlightBuilder highlightBuilder; private final StoredFieldsContext storedFieldsContext; - private final List docValueFields; + private final List docValueFields; private final List scriptFields; private final FetchSourceContext fetchSourceContext; TopHitsAggregatorFactory(String name, int from, int size, boolean explain, boolean version, boolean trackScores, Optional sort, HighlightBuilder highlightBuilder, StoredFieldsContext storedFieldsContext, - List docValueFields, List scriptFields, FetchSourceContext fetchSourceContext, + List docValueFields, List scriptFields, FetchSourceContext fetchSourceContext, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactories, Map metaData) throws IOException { super(name, context, parent, subFactories, metaData); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalSimpleValue.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalSimpleValue.java index 4fa4f1f6c86..2eac04a9581 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalSimpleValue.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalSimpleValue.java @@ -85,7 +85,7 @@ public class InternalSimpleValue extends InternalNumericMetricsAggregation.Singl boolean hasValue = !(Double.isInfinite(value) || Double.isNaN(value)); builder.field(CommonFields.VALUE.getPreferredName(), hasValue ? value : null); if (hasValue && format != DocValueFormat.RAW) { - builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(value)); + builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(value).toString()); } return builder; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/InternalBucketMetricValue.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/InternalBucketMetricValue.java index 76a240d3178..a7ef024028f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/InternalBucketMetricValue.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/InternalBucketMetricValue.java @@ -108,7 +108,7 @@ public class InternalBucketMetricValue extends InternalNumericMetricsAggregation boolean hasValue = !Double.isInfinite(value); builder.field(CommonFields.VALUE.getPreferredName(), hasValue ? value : null); if (hasValue && format != DocValueFormat.RAW) { - builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(value)); + builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(value).toString()); } builder.startArray(KEYS_FIELD.getPreferredName()); for (String key : keys) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/InternalPercentilesBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/InternalPercentilesBucket.java index 42a8b28a51a..5d13638f70a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/InternalPercentilesBucket.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/InternalPercentilesBucket.java @@ -97,7 +97,7 @@ public class InternalPercentilesBucket extends InternalNumericMetricsAggregation @Override public String percentileAsString(double percent) { - return format.format(percentile(percent)); + return format.format(percentile(percent)).toString(); } DocValueFormat formatter() { diff --git a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index c4a6b3da6b1..c42a1a12a18 100644 --- a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -47,6 +47,7 @@ import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.PipelineAggregationBuilder; import org.elasticsearch.search.collapse.CollapseBuilder; import org.elasticsearch.search.fetch.StoredFieldsContext; +import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext.FieldAndFormat; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; import org.elasticsearch.search.internal.SearchContext; @@ -64,6 +65,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Objects; +import java.util.stream.Collectors; import static org.elasticsearch.index.query.AbstractQueryBuilder.parseInnerQueryBuilder; @@ -162,7 +164,7 @@ public final class SearchSourceBuilder implements Writeable, ToXContentObject, R private int terminateAfter = SearchContext.DEFAULT_TERMINATE_AFTER; private StoredFieldsContext storedFieldsContext; - private List docValueFields; + private List docValueFields; private List scriptFields; private FetchSourceContext fetchSourceContext; @@ -197,7 +199,22 @@ public final class SearchSourceBuilder implements Writeable, ToXContentObject, R aggregations = in.readOptionalWriteable(AggregatorFactories.Builder::new); explain = in.readOptionalBoolean(); fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::new); - docValueFields = (List) in.readGenericValue(); + if (in.getVersion().before(Version.V_6_4_0)) { + List dvFields = (List) in.readGenericValue(); + if (dvFields == null) { + docValueFields = null; + } else { + docValueFields = dvFields.stream() + .map(field -> new FieldAndFormat(field, null)) + .collect(Collectors.toList()); + } + } else { + if (in.readBoolean()) { + docValueFields = in.readList(FieldAndFormat::new); + } else { + docValueFields = null; + } + } storedFieldsContext = in.readOptionalWriteable(StoredFieldsContext::new); from = in.readVInt(); highlightBuilder = in.readOptionalWriteable(HighlightBuilder::new); @@ -246,7 +263,16 @@ public final class SearchSourceBuilder implements Writeable, ToXContentObject, R out.writeOptionalWriteable(aggregations); out.writeOptionalBoolean(explain); out.writeOptionalWriteable(fetchSourceContext); - out.writeGenericValue(docValueFields); + if (out.getVersion().before(Version.V_6_4_0)) { + out.writeGenericValue(docValueFields == null + ? null + : docValueFields.stream().map(ff -> ff.field).collect(Collectors.toList())); + } else { + out.writeBoolean(docValueFields != null); + if (docValueFields != null) { + out.writeList(docValueFields); + } + } out.writeOptionalWriteable(storedFieldsContext); out.writeVInt(from); out.writeOptionalWriteable(highlightBuilder); @@ -764,22 +790,30 @@ public final class SearchSourceBuilder implements Writeable, ToXContentObject, R /** * Gets the docvalue fields. */ - public List docValueFields() { + public List docValueFields() { return docValueFields; } /** - * Adds a field to load from the docvalue and return as part of the + * Adds a field to load from the doc values and return as part of the * search request. */ - public SearchSourceBuilder docValueField(String name) { + public SearchSourceBuilder docValueField(String name, @Nullable String format) { if (docValueFields == null) { docValueFields = new ArrayList<>(); } - docValueFields.add(name); + docValueFields.add(new FieldAndFormat(name, format)); return this; } + /** + * Adds a field to load from the doc values and return as part of the + * search request. + */ + public SearchSourceBuilder docValueField(String name) { + return docValueField(name, null); + } + /** * Adds a script field under the given name with the provided script. * @@ -1076,12 +1110,7 @@ public final class SearchSourceBuilder implements Writeable, ToXContentObject, R } else if (DOCVALUE_FIELDS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { docValueFields = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - if (token == XContentParser.Token.VALUE_STRING) { - docValueFields.add(parser.text()); - } else { - throw new ParsingException(parser.getTokenLocation(), "Expected [" + XContentParser.Token.VALUE_STRING + - "] in [" + currentFieldName + "] but found [" + token + "]", parser.getTokenLocation()); - } + docValueFields.add(FieldAndFormat.fromXContent(parser)); } } else if (INDICES_BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { @@ -1177,8 +1206,13 @@ public final class SearchSourceBuilder implements Writeable, ToXContentObject, R if (docValueFields != null) { builder.startArray(DOCVALUE_FIELDS_FIELD.getPreferredName()); - for (String docValueField : docValueFields) { - builder.value(docValueField); + for (FieldAndFormat docValueField : docValueFields) { + builder.startObject() + .field("field", docValueField.field); + if (docValueField.format != null) { + builder.field("format", docValueField.format); + } + builder.endObject(); } builder.endArray(); } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsContext.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsContext.java index 325d28e4592..cf1596fd326 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsContext.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsContext.java @@ -18,23 +18,111 @@ */ package org.elasticsearch.search.fetch.subphase; +import org.elasticsearch.Version; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContent; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParser.Token; + +import java.io.IOException; import java.util.List; +import java.util.Objects; /** * All the required context to pull a field from the doc values. */ public class DocValueFieldsContext { - private final List fields; + public static final String USE_DEFAULT_FORMAT = "use_field_mapping"; - public DocValueFieldsContext(List fields) { + /** + * Wrapper around a field name and the format that should be used to + * display values of this field. + */ + public static final class FieldAndFormat implements Writeable { + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("script", + a -> new FieldAndFormat((String) a[0], (String) a[1])); + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField("field")); + PARSER.declareStringOrNull(ConstructingObjectParser.optionalConstructorArg(), new ParseField("format")); + } + + /** + * Parse a {@link FieldAndFormat} from some {@link XContent}. + */ + public static FieldAndFormat fromXContent(XContentParser parser) throws IOException { + Token token = parser.currentToken(); + if (token.isValue()) { + return new FieldAndFormat(parser.text(), null); + } else { + return PARSER.apply(parser, null); + } + } + + /** The name of the field. */ + public final String field; + + /** The format of the field, or {@code null} if defaults should be used. */ + public final String format; + + /** Sole constructor. */ + public FieldAndFormat(String field, @Nullable String format) { + this.field = Objects.requireNonNull(field); + this.format = format; + } + + /** Serialization constructor. */ + public FieldAndFormat(StreamInput in) throws IOException { + this.field = in.readString(); + if (in.getVersion().onOrAfter(Version.V_6_4_0)) { + format = in.readOptionalString(); + } else { + format = null; + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(field); + if (out.getVersion().onOrAfter(Version.V_6_4_0)) { + out.writeOptionalString(format); + } + } + + @Override + public int hashCode() { + int h = field.hashCode(); + h = 31 * h + Objects.hashCode(format); + return h; + } + + @Override + public boolean equals(Object obj) { + if (obj == null || getClass() != obj.getClass()) { + return false; + } + FieldAndFormat other = (FieldAndFormat) obj; + return field.equals(other.field) && Objects.equals(format, other.format); + } + + } + + private final List fields; + + public DocValueFieldsContext(List fields) { this.fields = fields; } /** * Returns the required docvalue fields */ - public List fields() { + public List fields() { return this.fields; } } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsFetchSubPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsFetchSubPhase.java index 60def08c891..a1562e118fb 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsFetchSubPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsFetchSubPhase.java @@ -20,19 +20,32 @@ package org.elasticsearch.search.fetch.subphase; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.ReaderUtil; +import org.apache.lucene.index.SortedNumericDocValues; import org.elasticsearch.common.document.DocumentField; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.index.fielddata.AtomicFieldData; +import org.elasticsearch.index.fielddata.AtomicNumericFieldData; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.fielddata.ScriptDocValues; +import org.elasticsearch.index.fielddata.SortedBinaryDocValues; +import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.fetch.FetchSubPhase; +import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext.FieldAndFormat; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.Comparator; import java.util.HashMap; +import java.util.List; +import java.util.Objects; /** * Query sub phase which pulls data from doc values @@ -41,6 +54,8 @@ import java.util.HashMap; */ public final class DocValueFieldsFetchSubPhase implements FetchSubPhase { + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(DocValueFieldsFetchSubPhase.class)); + @Override public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOException { @@ -48,9 +63,10 @@ public final class DocValueFieldsFetchSubPhase implements FetchSubPhase { // retrieve the `doc_value` associated with the collapse field String name = context.collapse().getFieldType().name(); if (context.docValueFieldsContext() == null) { - context.docValueFieldsContext(new DocValueFieldsContext(Collections.singletonList(name))); - } else if (context.docValueFieldsContext().fields().contains(name) == false) { - context.docValueFieldsContext().fields().add(name); + context.docValueFieldsContext(new DocValueFieldsContext( + Collections.singletonList(new FieldAndFormat(name, DocValueFieldsContext.USE_DEFAULT_FORMAT)))); + } else if (context.docValueFieldsContext().fields().stream().map(ff -> ff.field).anyMatch(name::equals) == false) { + context.docValueFieldsContext().fields().add(new FieldAndFormat(name, DocValueFieldsContext.USE_DEFAULT_FORMAT)); } } @@ -59,24 +75,51 @@ public final class DocValueFieldsFetchSubPhase implements FetchSubPhase { } hits = hits.clone(); // don't modify the incoming hits - Arrays.sort(hits, (a, b) -> Integer.compare(a.docId(), b.docId())); + Arrays.sort(hits, Comparator.comparingInt(SearchHit::docId)); - for (String field : context.docValueFieldsContext().fields()) { + for (FieldAndFormat fieldAndFormat : context.docValueFieldsContext().fields()) { + String field = fieldAndFormat.field; MappedFieldType fieldType = context.mapperService().fullName(field); if (fieldType != null) { + final IndexFieldData indexFieldData = context.getForField(fieldType); + final DocValueFormat format; + if (fieldAndFormat.format == null) { + DEPRECATION_LOGGER.deprecated("Doc-value field [" + fieldAndFormat.field + "] is not using a format. The output will " + + "change in 7.0 when doc value fields get formatted based on mappings by default. It is recommended to pass " + + "[format={}] with the doc value field in order to opt in for the future behaviour and ease the migration to " + + "7.0.", DocValueFieldsContext.USE_DEFAULT_FORMAT); + format = null; + } else { + String formatDesc = fieldAndFormat.format; + if (Objects.equals(formatDesc, DocValueFieldsContext.USE_DEFAULT_FORMAT)) { + formatDesc = null; + } + format = fieldType.docValueFormat(formatDesc, null); + } LeafReaderContext subReaderContext = null; AtomicFieldData data = null; - ScriptDocValues values = null; + ScriptDocValues scriptValues = null; // legacy + SortedBinaryDocValues binaryValues = null; // binary / string / ip fields + SortedNumericDocValues longValues = null; // int / date fields + SortedNumericDoubleValues doubleValues = null; // floating-point fields for (SearchHit hit : hits) { // if the reader index has changed we need to get a new doc values reader instance if (subReaderContext == null || hit.docId() >= subReaderContext.docBase + subReaderContext.reader().maxDoc()) { int readerIndex = ReaderUtil.subIndex(hit.docId(), context.searcher().getIndexReader().leaves()); subReaderContext = context.searcher().getIndexReader().leaves().get(readerIndex); - data = context.getForField(fieldType).load(subReaderContext); - values = data.getScriptValues(); + data = indexFieldData.load(subReaderContext); + if (format == null) { + scriptValues = data.getScriptValues(); + } else if (indexFieldData instanceof IndexNumericFieldData) { + if (((IndexNumericFieldData) indexFieldData).getNumericType().isFloatingPoint()) { + doubleValues = ((AtomicNumericFieldData) data).getDoubleValues(); + } else { + longValues = ((AtomicNumericFieldData) data).getLongValues(); + } + } else { + binaryValues = data.getBytesValues(); + } } - int subDocId = hit.docId() - subReaderContext.docBase; - values.setNextDocId(subDocId); if (hit.fieldsOrNull() == null) { hit.fields(new HashMap<>(2)); } @@ -85,7 +128,33 @@ public final class DocValueFieldsFetchSubPhase implements FetchSubPhase { hitField = new DocumentField(field, new ArrayList<>(2)); hit.getFields().put(field, hitField); } - hitField.getValues().addAll(values); + final List values = hitField.getValues(); + + int subDocId = hit.docId() - subReaderContext.docBase; + if (scriptValues != null) { + scriptValues.setNextDocId(subDocId); + values.addAll(scriptValues); + } else if (binaryValues != null) { + if (binaryValues.advanceExact(subDocId)) { + for (int i = 0, count = binaryValues.docValueCount(); i < count; ++i) { + values.add(format.format(binaryValues.nextValue())); + } + } + } else if (longValues != null) { + if (longValues.advanceExact(subDocId)) { + for (int i = 0, count = longValues.docValueCount(); i < count; ++i) { + values.add(format.format(longValues.nextValue())); + } + } + } else if (doubleValues != null) { + if (doubleValues.advanceExact(subDocId)) { + for (int i = 0, count = doubleValues.docValueCount(); i < count; ++i) { + values.add(format.format(doubleValues.nextValue())); + } + } + } else { + throw new AssertionError("Unreachable code"); + } } } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java index 0aa5691dc67..43d94f56e5a 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java @@ -67,7 +67,7 @@ public class RepositoryBlocksIT extends ESIntegTestCase { try { setClusterReadOnly(true); VerifyRepositoryResponse response = client().admin().cluster().prepareVerifyRepository("test-repo-blocks").execute().actionGet(); - assertThat(response.getNodes().length, equalTo(cluster().numDataAndMasterNodes())); + assertThat(response.getNodes().size(), equalTo(cluster().numDataAndMasterNodes())); } finally { setClusterReadOnly(false); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java index c66fa4b244f..5ca7cb1e506 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java @@ -73,7 +73,7 @@ public class SnapshotBlocksIT extends ESIntegTestCase { logger.info("--> verify the repository"); VerifyRepositoryResponse verifyResponse = client().admin().cluster().prepareVerifyRepository(REPOSITORY_NAME).get(); - assertThat(verifyResponse.getNodes().length, equalTo(cluster().numDataAndMasterNodes())); + assertThat(verifyResponse.getNodes().size(), equalTo(cluster().numDataAndMasterNodes())); logger.info("--> create a snapshot"); CreateSnapshotResponse snapshotResponse = client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME) diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestStreamableTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestStreamableTests.java index 114af3c13e7..e8dd3943cb7 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestStreamableTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestStreamableTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings.Builder; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -38,8 +39,9 @@ public class UpdateSettingsRequestStreamableTests extends AbstractStreamableTest protected UpdateSettingsRequest mutateInstance(UpdateSettingsRequest request) { UpdateSettingsRequest mutation = copyRequest(request); List mutators = new ArrayList<>(); - mutators.add(() -> mutation.masterNodeTimeout(randomTimeValue())); - mutators.add(() -> mutation.timeout(randomTimeValue())); + mutators.add(() -> mutation + .masterNodeTimeout(randomValueOtherThan(request.masterNodeTimeout().getStringRep(), ESTestCase::randomTimeValue))); + mutators.add(() -> mutation.timeout(randomValueOtherThan(request.masterNodeTimeout().getStringRep(), ESTestCase::randomTimeValue))); mutators.add(() -> mutation.settings(mutateSettings(request.settings()))); mutators.add(() -> mutation.indices(mutateIndices(request.indices()))); mutators.add(() -> mutation.indicesOptions(randomValueOtherThan(request.indicesOptions(), @@ -72,7 +74,7 @@ public class UpdateSettingsRequestStreamableTests extends AbstractStreamableTest private static UpdateSettingsRequest copyRequest(UpdateSettingsRequest request) { UpdateSettingsRequest result = new UpdateSettingsRequest(request.settings(), request.indices()); - result.masterNodeTimeout(request.timeout()); + result.masterNodeTimeout(request.masterNodeTimeout()); result.timeout(request.timeout()); result.indicesOptions(request.indicesOptions()); result.setPreserveExisting(request.isPreserveExisting()); diff --git a/server/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsIT.java b/server/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsIT.java index ab3f82fff75..a11ceddf287 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsIT.java @@ -23,6 +23,7 @@ import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; @@ -33,8 +34,16 @@ import org.elasticsearch.cluster.routing.allocation.decider.ConcurrentRebalanceA import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.DiscoverySettings; +import org.elasticsearch.discovery.zen.PublishClusterStateAction; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.transport.TransportService; + +import java.util.Arrays; +import java.util.Collection; +import java.util.stream.Stream; import static org.elasticsearch.test.ESIntegTestCase.Scope.TEST; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -43,6 +52,11 @@ import static org.hamcrest.Matchers.equalTo; @ClusterScope(scope = TEST, minNumDataNodes = 2) public class AckClusterUpdateSettingsIT extends ESIntegTestCase { + @Override + protected Collection> nodePlugins() { + return Arrays.asList(MockTransportService.TestPlugin.class); + } + @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() @@ -156,4 +170,32 @@ public class AckClusterUpdateSettingsIT extends ESIntegTestCase { assertThat(openIndexResponse.isAcknowledged(), equalTo(false)); ensureGreen("test"); // make sure that recovery from disk has completed, so that check index doesn't fail. } + + public void testAckingFailsIfNotPublishedToAllNodes() { + String masterNode = internalCluster().getMasterName(); + String nonMasterNode = Stream.of(internalCluster().getNodeNames()) + .filter(node -> node.equals(masterNode) == false).findFirst().get(); + + MockTransportService masterTransportService = + (MockTransportService) internalCluster().getInstance(TransportService.class, masterNode); + MockTransportService nonMasterTransportService = + (MockTransportService) internalCluster().getInstance(TransportService.class, nonMasterNode); + + logger.info("blocking cluster state publishing from master [{}] to non master [{}]", masterNode, nonMasterNode); + if (randomBoolean() && internalCluster().numMasterNodes() != 2) { + masterTransportService.addFailToSendNoConnectRule(nonMasterTransportService, PublishClusterStateAction.SEND_ACTION_NAME); + } else { + masterTransportService.addFailToSendNoConnectRule(nonMasterTransportService, PublishClusterStateAction.COMMIT_ACTION_NAME); + } + + CreateIndexResponse response = client().admin().indices().prepareCreate("test").get(); + assertFalse(response.isAcknowledged()); + + logger.info("waiting for cluster to reform"); + masterTransportService.clearRule(nonMasterTransportService); + + ensureStableCluster(internalCluster().size()); + + assertAcked(client().admin().indices().prepareDelete("test")); + } } diff --git a/server/src/test/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java index 344c5567a86..f6882912378 100644 --- a/server/src/test/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java @@ -53,7 +53,6 @@ import java.util.concurrent.CountDownLatch; import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING; import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.greaterThanOrEqualTo; /** @@ -256,8 +255,8 @@ public class DiscoveryDisruptionIT extends AbstractDisruptionTestCase { internalCluster().setDisruptionScheme(isolatePreferredMaster); isolatePreferredMaster.startDisrupting(); - assertAcked(client(randomFrom(nonPreferredNodes)).admin().indices().prepareCreate("test").setSettings( - Settings.builder().put(INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1).put(INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0))); + client(randomFrom(nonPreferredNodes)).admin().indices().prepareCreate("test").setSettings( + Settings.builder().put(INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1).put(INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)).get(); internalCluster().clearDisruptionScheme(false); internalCluster().setDisruptionScheme(isolateAllNodes); diff --git a/server/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java b/server/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java index f32e93bb82d..03c0df43591 100644 --- a/server/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java @@ -175,17 +175,19 @@ public class ZenFaultDetectionTests extends ESTestCase { final Settings pingSettings = Settings.builder() .put(FaultDetection.CONNECT_ON_NETWORK_DISCONNECT_SETTING.getKey(), shouldRetry) .put(FaultDetection.PING_INTERVAL_SETTING.getKey(), "5m").build(); - ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(buildNodesForA(true)).build(); + ClusterState clusterState = ClusterState.builder(new ClusterName("test")).version(randomNonNegativeLong()) + .nodes(buildNodesForA(true)).build(); NodesFaultDetection nodesFDA = new NodesFaultDetection(Settings.builder().put(settingsA).put(pingSettings).build(), - threadPool, serviceA, clusterState.getClusterName()); + threadPool, serviceA, () -> clusterState, clusterState.getClusterName()); nodesFDA.setLocalNode(nodeA); NodesFaultDetection nodesFDB = new NodesFaultDetection(Settings.builder().put(settingsB).put(pingSettings).build(), - threadPool, serviceB, clusterState.getClusterName()); + threadPool, serviceB, () -> clusterState, clusterState.getClusterName()); nodesFDB.setLocalNode(nodeB); final CountDownLatch pingSent = new CountDownLatch(1); nodesFDB.addListener(new NodesFaultDetection.Listener() { @Override public void onPingReceived(NodesFaultDetection.PingRequest pingRequest) { + assertThat(pingRequest.clusterStateVersion(), equalTo(clusterState.version())); pingSent.countDown(); } }); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/BooleanFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/BooleanFieldTypeTests.java index d119a27f22e..8621e775838 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/BooleanFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/BooleanFieldTypeTests.java @@ -36,8 +36,8 @@ public class BooleanFieldTypeTests extends FieldTypeTestCase { public void testValueFormat() { MappedFieldType ft = createDefaultFieldType(); - assertEquals("false", ft.docValueFormat(null, null).format(0)); - assertEquals("true", ft.docValueFormat(null, null).format(1)); + assertEquals(false, ft.docValueFormat(null, null).format(0)); + assertEquals(true, ft.docValueFormat(null, null).format(1)); } public void testValueForSearch() { diff --git a/server/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java index a4e68561662..a2068a666f4 100644 --- a/server/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java @@ -32,6 +32,8 @@ import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; +import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext.FieldAndFormat; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilderTests; import org.elasticsearch.search.internal.ShardSearchLocalRequest; @@ -147,7 +149,9 @@ public class InnerHitBuilderTests extends ESTestCase { if (randomBoolean()) { innerHits.setStoredFieldNames(randomListStuff(16, () -> randomAlphaOfLengthBetween(1, 16))); } - innerHits.setDocValueFields(randomListStuff(16, () -> randomAlphaOfLengthBetween(1, 16))); + innerHits.setDocValueFields(randomListStuff(16, + () -> new FieldAndFormat(randomAlphaOfLengthBetween(1, 16), + randomBoolean() ? null : DocValueFieldsContext.USE_DEFAULT_FORMAT))); // Random script fields deduped on their field name. Map scriptFields = new HashMap<>(); for (SearchSourceBuilder.ScriptField field: randomListStuff(16, InnerHitBuilderTests::randomScript)) { @@ -187,9 +191,9 @@ public class InnerHitBuilderTests extends ESTestCase { modifiers.add(() -> copy.setName(randomValueOtherThan(copy.getName(), () -> randomAlphaOfLengthBetween(1, 16)))); modifiers.add(() -> { if (randomBoolean()) { - copy.setDocValueFields(randomValueOtherThan(copy.getDocValueFields(), () -> { - return randomListStuff(16, () -> randomAlphaOfLengthBetween(1, 16)); - })); + copy.setDocValueFields(randomValueOtherThan(copy.getDocValueFields(), + () -> randomListStuff(16, () -> new FieldAndFormat(randomAlphaOfLengthBetween(1, 16), + randomBoolean() ? null : DocValueFieldsContext.USE_DEFAULT_FORMAT)))); } else { copy.addDocValueField(randomAlphaOfLengthBetween(1, 16)); } diff --git a/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java b/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java index 108b41d54a0..23533217ba1 100644 --- a/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java +++ b/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java @@ -470,8 +470,6 @@ public class CorruptedFileIT extends ESIntegTestCase { * TODO once checksum verification on snapshotting is implemented this test needs to be fixed or split into several * parts... We should also corrupt files on the actual snapshot and check that we don't restore the corrupted shard. */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/30577") - @TestLogging("org.elasticsearch.repositories:TRACE,org.elasticsearch.snapshots:TRACE,org.elasticsearch.index.engine:DEBUG") public void testCorruptFileThenSnapshotAndRestore() throws ExecutionException, InterruptedException, IOException { int numDocs = scaledRandomIntBetween(100, 1000); internalCluster().ensureAtLeastNumDataNodes(2); @@ -520,10 +518,6 @@ public class CorruptedFileIT extends ESIntegTestCase { break; } } - if (snapshotState != SnapshotState.PARTIAL) { - logger.info("--> listing shard files for investigation"); - files.forEach(f -> logger.info("path: {}", f.toAbsolutePath())); - } assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.PARTIAL)); assertThat(corruptedFile, notNullValue()); } diff --git a/server/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java b/server/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java index afe421a2916..41a245aca9c 100644 --- a/server/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java +++ b/server/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java @@ -181,6 +181,12 @@ public class RareClusterStateIT extends ESIntegTestCase { logger.info("--> letting cluster proceed"); disruption.stopDisrupting(); ensureGreen(TimeValue.timeValueMinutes(30), "test"); + // due to publish_timeout of 0, wait for data node to have cluster state fully applied + assertBusy(() -> { + long masterClusterStateVersion = internalCluster().clusterService(internalCluster().getMasterName()).state().version(); + long dataClusterStateVersion = internalCluster().clusterService(dataNode).state().version(); + assertThat(masterClusterStateVersion, equalTo(dataClusterStateVersion)); + }); assertHitCount(client().prepareSearch("test").get(), 0); } diff --git a/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java b/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java index 7bf5308eb63..e5cfbf98b3d 100644 --- a/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java +++ b/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java @@ -85,20 +85,20 @@ public class DocValueFormatTests extends ESTestCase { } public void testRawFormat() { - assertEquals("0", DocValueFormat.RAW.format(0)); - assertEquals("-1", DocValueFormat.RAW.format(-1)); - assertEquals("1", DocValueFormat.RAW.format(1)); + assertEquals(0L, DocValueFormat.RAW.format(0)); + assertEquals(-1L, DocValueFormat.RAW.format(-1)); + assertEquals(1L, DocValueFormat.RAW.format(1)); - assertEquals("0.0", DocValueFormat.RAW.format(0d)); - assertEquals("0.5", DocValueFormat.RAW.format(.5d)); - assertEquals("-1.0", DocValueFormat.RAW.format(-1d)); + assertEquals(0d, DocValueFormat.RAW.format(0d)); + assertEquals(.5d, DocValueFormat.RAW.format(.5d)); + assertEquals(-1d, DocValueFormat.RAW.format(-1d)); assertEquals("abc", DocValueFormat.RAW.format(new BytesRef("abc"))); } public void testBooleanFormat() { - assertEquals("false", DocValueFormat.BOOLEAN.format(0)); - assertEquals("true", DocValueFormat.BOOLEAN.format(1)); + assertEquals(false, DocValueFormat.BOOLEAN.format(0)); + assertEquals(true, DocValueFormat.BOOLEAN.format(1)); } public void testIpFormat() { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalSumTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalSumTests.java index 884f9bfbe0d..aa9d25af49e 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalSumTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalSumTests.java @@ -37,7 +37,7 @@ public class InternalSumTests extends InternalAggregationTestCase { @Override protected InternalSum createTestInstance(String name, List pipelineAggregators, Map metaData) { double value = frequently() ? randomDouble() : randomFrom(Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, Double.NaN); - DocValueFormat formatter = randomFrom(new DocValueFormat.Decimal("###.##"), DocValueFormat.BOOLEAN, DocValueFormat.RAW); + DocValueFormat formatter = randomFrom(new DocValueFormat.Decimal("###.##"), DocValueFormat.RAW); return new InternalSum(name, value, formatter, pipelineAggregators, metaData); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java index 4f8493c0b00..952eb22848e 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java @@ -583,7 +583,7 @@ public class TopHitsIT extends ESIntegTestCase { .highlighter(new HighlightBuilder().field("text")) .explain(true) .storedField("text") - .fieldDataField("field1") + .docValueField("field1") .scriptField("script", new Script(ScriptType.INLINE, MockScriptEngine.NAME, "5", Collections.emptyMap())) .fetchSource("text", null) .version(true) @@ -865,7 +865,7 @@ public class TopHitsIT extends ESIntegTestCase { .addAggregation( nested("to-comments", "comments").subAggregation( topHits("top-comments").size(1).highlighter(new HighlightBuilder().field(hlField)).explain(true) - .fieldDataField("comments.user") + .docValueField("comments.user") .scriptField("script", new Script(ScriptType.INLINE, MockScriptEngine.NAME, "5", Collections.emptyMap())).fetchSource("comments.message", null) .version(true).sort("comments.date", SortOrder.ASC))).get(); assertHitCount(searchResponse, 2); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java index e2ef28480fa..4d2331b86f2 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java @@ -81,7 +81,7 @@ public class TopHitsTests extends BaseAggregationTestCase(searchResponse.getHits().getAt(0).getFields().keySet()); + assertThat(fields, equalTo(newHashSet("byte_field", "short_field", "integer_field", "long_field", + "float_field", "double_field", "date_field", "boolean_field", "text_field", "keyword_field", + "ip_field"))); + + assertThat(searchResponse.getHits().getAt(0).getFields().get("byte_field").getValue().toString(), equalTo("1")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("short_field").getValue().toString(), equalTo("2")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("integer_field").getValue(), equalTo((Object) 3L)); + assertThat(searchResponse.getHits().getAt(0).getFields().get("long_field").getValue(), equalTo((Object) 4L)); + assertThat(searchResponse.getHits().getAt(0).getFields().get("float_field").getValue(), equalTo((Object) 5.0)); + assertThat(searchResponse.getHits().getAt(0).getFields().get("double_field").getValue(), equalTo((Object) 6.0d)); + assertThat(searchResponse.getHits().getAt(0).getFields().get("date_field").getValue(), + equalTo(Joda.forPattern("dateOptionalTime").printer().print(date))); + assertThat(searchResponse.getHits().getAt(0).getFields().get("boolean_field").getValue(), equalTo((Object) true)); + assertThat(searchResponse.getHits().getAt(0).getFields().get("text_field").getValue(), equalTo("foo")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("keyword_field").getValue(), equalTo("foo")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("ip_field").getValue(), equalTo("::1")); + + builder = client().prepareSearch().setQuery(matchAllQuery()) + .addDocValueField("byte_field", "#.0") + .addDocValueField("short_field", "#.0") + .addDocValueField("integer_field", "#.0") + .addDocValueField("long_field", "#.0") + .addDocValueField("float_field", "#.0") + .addDocValueField("double_field", "#.0") + .addDocValueField("date_field", "epoch_millis"); + searchResponse = builder.execute().actionGet(); + + assertThat(searchResponse.getHits().getTotalHits(), equalTo(1L)); + assertThat(searchResponse.getHits().getHits().length, equalTo(1)); + fields = new HashSet<>(searchResponse.getHits().getAt(0).getFields().keySet()); + assertThat(fields, equalTo(newHashSet("byte_field", "short_field", "integer_field", "long_field", + "float_field", "double_field", "date_field"))); + + assertThat(searchResponse.getHits().getAt(0).getFields().get("byte_field").getValue(), equalTo("1.0")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("short_field").getValue(), equalTo("2.0")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("integer_field").getValue(), equalTo("3.0")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("long_field").getValue(), equalTo("4.0")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("float_field").getValue(), equalTo("5.0")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("double_field").getValue(), equalTo("6.0")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("date_field").getValue(), + equalTo(Joda.forPattern("epoch_millis").printer().print(date))); } public void testScriptFields() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/snapshots/RepositoriesIT.java b/server/src/test/java/org/elasticsearch/snapshots/RepositoriesIT.java index d9d06c26b7d..23cb579bfdc 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/RepositoriesIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/RepositoriesIT.java @@ -61,7 +61,7 @@ public class RepositoriesIT extends AbstractSnapshotIntegTestCase { logger.info("--> verify the repository"); int numberOfFiles = FileSystemUtils.files(location).length; VerifyRepositoryResponse verifyRepositoryResponse = client.admin().cluster().prepareVerifyRepository("test-repo-1").get(); - assertThat(verifyRepositoryResponse.getNodes().length, equalTo(cluster().numDataAndMasterNodes())); + assertThat(verifyRepositoryResponse.getNodes().size(), equalTo(cluster().numDataAndMasterNodes())); logger.info("--> verify that we didn't leave any files as a result of verification"); assertThat(FileSystemUtils.files(location).length, equalTo(numberOfFiles)); diff --git a/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java b/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java index 0396b8ac788..f26c44e05f5 100644 --- a/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java @@ -217,9 +217,9 @@ public abstract class AnalysisFactoryTestCase extends ESTestCase { // should we expose it, or maybe think about higher level integration of the // fake term frequency feature (LUCENE-7854) .put("delimitedtermfrequency", Void.class) - // LUCENE-8273: ConditionalTokenFilter allows analysis chains to skip + // LUCENE-8273: ProtectedTermFilterFactory allows analysis chains to skip // particular token filters based on the attributes of the current token. - .put("termexclusion", Void.class) + .put("protectedterm", Void.class) .immutableMap(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 5099fc0540d..de4b5a5ae05 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -909,7 +909,7 @@ public final class InternalTestCluster extends TestCluster { private void createNewNode(final Settings newSettings) { final long newIdSeed = NodeEnvironment.NODE_ID_SEED_SETTING.get(node.settings()) + 1; // use a new seed to make sure we have new node id - Settings finalSettings = Settings.builder().put(node.settings()).put(newSettings).put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), newIdSeed).build(); + Settings finalSettings = Settings.builder().put(node.originalSettings()).put(newSettings).put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), newIdSeed).build(); if (DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.exists(finalSettings) == false) { throw new IllegalStateException(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey() + " is not configured after restart of [" + name + "]"); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java index e0b501c5f25..30ac94e3432 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java @@ -22,7 +22,6 @@ package org.elasticsearch.test.rest.yaml; import com.carrotsearch.randomizedtesting.RandomizedTest; import org.apache.http.HttpHost; import org.apache.http.entity.StringEntity; -import org.apache.http.message.BasicHeader; import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -323,7 +322,7 @@ public abstract class ESClientYamlSuiteTestCase extends ESRestTestCase { if (useDefaultNumberOfShards == false && testCandidate.getTestSection().getSkipSection().getFeatures().contains("default_shards") == false) { final Request request = new Request("PUT", "/_template/global"); - request.setHeaders(new BasicHeader("Content-Type", XContentType.JSON.mediaTypeWithoutParameters())); + request.addHeader("Content-Type", XContentType.JSON.mediaTypeWithoutParameters()); request.setEntity(new StringEntity("{\"index_patterns\":[\"*\"],\"settings\":{\"index.number_of_shards\":2}}")); adminClient().performRequest(request); } diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java index c70708c73ac..ca674e61651 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java @@ -476,9 +476,11 @@ public class InternalTestClusterTests extends ESTestCase { boolean enableHttpPipelining = randomBoolean(); String nodePrefix = "test"; Path baseDir = createTempDir(); + List> plugins = new ArrayList<>(mockPlugins()); + plugins.add(NodeAttrCheckPlugin.class); InternalTestCluster cluster = new InternalTestCluster(randomLong(), baseDir, false, true, 2, 2, "test", nodeConfigurationSource, 0, enableHttpPipelining, nodePrefix, - mockPlugins(), Function.identity()); + plugins, Function.identity()); try { cluster.beforeTest(random(), 0.0); assertMMNinNodeSetting(cluster, 2); @@ -509,4 +511,26 @@ public class InternalTestClusterTests extends ESTestCase { cluster.close(); } } + + /** + * Plugin that adds a simple node attribute as setting and checks if that node attribute is not already defined. + * Allows to check that the full-cluster restart logic does not copy over plugin-derived settings. + */ + public static class NodeAttrCheckPlugin extends Plugin { + + private final Settings settings; + + public NodeAttrCheckPlugin(Settings settings) { + this.settings = settings; + } + + @Override + public Settings additionalSettings() { + if (settings.get("node.attr.dummy") != null) { + fail("dummy setting already exists"); + } + return Settings.builder().put("node.attr.dummy", true).build(); + } + + } } diff --git a/x-pack/docs/en/ml/configuring.asciidoc b/x-pack/docs/en/ml/configuring.asciidoc index 9e7b787dcea..b794d3ebd33 100644 --- a/x-pack/docs/en/ml/configuring.asciidoc +++ b/x-pack/docs/en/ml/configuring.asciidoc @@ -34,8 +34,17 @@ The scenarios in this section describe some best practices for generating useful * <> * <> +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/customurl.asciidoc include::customurl.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/aggregations.asciidoc include::aggregations.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/categories.asciidoc include::categories.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/populations.asciidoc include::populations.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/transforms.asciidoc include::transforms.asciidoc[] diff --git a/x-pack/docs/en/ml/functions.asciidoc b/x-pack/docs/en/ml/functions.asciidoc index a59b2892667..ae5f768e056 100644 --- a/x-pack/docs/en/ml/functions.asciidoc +++ b/x-pack/docs/en/ml/functions.asciidoc @@ -18,27 +18,6 @@ variations (for example, `count`, `low_count`, and `high_count`). These variatio apply one-sided tests, detecting anomalies only when the values are low or high, depending one which alternative is used. -//For some functions, you can optionally specify a field name in the -//`by_field_name` property. The analysis then considers whether there is an -//anomaly for one of more specific values of that field. In {kib}, use the -//**Key Fields** field in multi-metric jobs or the **by_field_name** field in -//advanced jobs. -//// -TODO: Per Sophie, "This is incorrect... Split Data refers to a partition_field_name. Over fields can only be added in Adv Config... - -Can you please remove the explanations for by/over/partition fields from the documentation for analytical functions. It's a complex topic and will be easier to review in a separate exercise." -//// - -//For some functions, you can also optionally specify a field name in the -//`over_field_name` property. This property shifts the analysis to be population- -//or peer-based and uses the field to split the data. In {kib}, use the -//**Split Data** field in multi-metric jobs or the **over_field_name** field in -//advanced jobs. - -//You can specify a `partition_field_name` with any function. The analysis is then -//segmented with completely independent baselines for each value of that field. -//In {kib}, use the **partition_field_name** field in advanced jobs. - You can specify a `summary_count_field_name` with any function except `metric`. When you use `summary_count_field_name`, the {ml} features expect the input data to be pre-aggregated. The value of the `summary_count_field_name` field @@ -55,13 +34,6 @@ functions are strongly affected by empty buckets. For this reason, there are `non_null_sum` and `non_zero_count` functions, which are tolerant to sparse data. These functions effectively ignore empty buckets. -//// -Some functions can benefit from overlapping buckets. This improves the overall -accuracy of the results but at the cost of a 2 bucket delay in seeing the results. - -The table below provides a high-level summary of the analytical functions provided by the API. Each of the functions is described in detail over the following pages. Note the examples given in these pages use single Detector Configuration objects. -//// - * <> * <> * <> @@ -70,10 +42,23 @@ The table below provides a high-level summary of the analytical functions provid * <> * <> +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/functions/count.asciidoc include::functions/count.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/functions/geo.asciidoc include::functions/geo.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/functions/info.asciidoc include::functions/info.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/functions/metric.asciidoc include::functions/metric.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/functions/rare.asciidoc include::functions/rare.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/functions/sum.asciidoc include::functions/sum.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/functions/time.asciidoc include::functions/time.asciidoc[] diff --git a/x-pack/docs/en/ml/getting-started.asciidoc b/x-pack/docs/en/ml/getting-started.asciidoc index 5b15de51f0b..2fd4f1ebe49 100644 --- a/x-pack/docs/en/ml/getting-started.asciidoc +++ b/x-pack/docs/en/ml/getting-started.asciidoc @@ -72,9 +72,20 @@ significant changes to the system. You can alternatively assign the For more information, see <> and <>. +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/getting-started-data.asciidoc include::getting-started-data.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/getting-started-wizards.asciidoc include::getting-started-wizards.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/getting-started-single.asciidoc include::getting-started-single.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/getting-started-multi.asciidoc include::getting-started-multi.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/getting-started-forecast.asciidoc include::getting-started-forecast.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/getting-started-next.asciidoc include::getting-started-next.asciidoc[] diff --git a/x-pack/docs/en/ml/overview.asciidoc b/x-pack/docs/en/ml/overview.asciidoc index b82a281acb0..5c941b4eda2 100644 --- a/x-pack/docs/en/ml/overview.asciidoc +++ b/x-pack/docs/en/ml/overview.asciidoc @@ -17,4 +17,5 @@ include::calendars.asciidoc[] There are a few concepts that are core to {ml} in {xpack}. Understanding these concepts from the outset will tremendously help ease the learning process. +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/architecture.asciidoc include::architecture.asciidoc[] diff --git a/x-pack/docs/en/security/authentication/configuring-saml-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-saml-realm.asciidoc new file mode 100644 index 00000000000..cbcbeebb359 --- /dev/null +++ b/x-pack/docs/en/security/authentication/configuring-saml-realm.asciidoc @@ -0,0 +1,225 @@ +[role="xpack"] +[[configuring-saml-realm]] +=== Configuring a SAML realm + +The {stack} supports Security Assertion Markup Language Single Sign On (SAML SSO) +into {kib} with {es} as a backend service. In particular, the {stack} supports +the SAML 2.0 Web Browser SSO and the SAML 2.0 Single Logout profiles. It can +integrate with any identity provider (IdP) that supports at least the SAML 2.0 +Web Browser SSO Profile. + +In SAML terminology, the {stack} is operating as a _service provider_ (SP). For more +information, see {stack-ov}/saml-realm.html[SAML authentication] and +{stack-ov}/saml-guide.html[Configuring SAML SSO on the {stack}]. + +[NOTE] +-- + +* If you configure a SAML realm for use in {kib}, you should also configure +another realm, such as the native realm in your authentication chain. +* These instructions assume that you have an existing SAML identity provider. +-- + +To enable SAML authentication in {es} and add the {stack} as a service provider: + +. Enable SSL/TLS for HTTP. ++ +-- +If your {es} cluster is operating in production mode, you must +configure the HTTP interface to use TLS before you can enable SAML +authentication. + +See <>. +-- + +. Enable the Token Service. ++ +-- +The {es} SAML implementation makes use of the {es} Token Service. This service +is automatically enabled if you configure TLS on the HTTP interface. You can +explicitly enable it by including the following setting in your +`elasticsearch.yml` file: + +[source, yaml] +------------------------------------------------------------ +xpack.security.authc.token.enabled: true +------------------------------------------------------------ +-- + +. Configure a SAML IdP metadata file. ++ +-- +The {stack} uses a standard SAML metadata document in XML format, which defines +the capabilities and features of your identity provider. You should be able to +download or generate such a document within your IdP administration interface. + +Most IdPs will provide an appropriate metadata file with all the features that +the {stack} requires. For more information, see +{stack-ov}/saml-guide-idp.html[The identity provider]. +-- + +.. Download the IdP metadata document and store it within the `config` directory +on each {es} node. For example, store it as `config/saml/idp-metadata.xml`. + +.. Get the identifier for your identity provider. ++ +-- +The IdP will have been assigned an identifier (_EntityID_ in SAML terminology), +which is most commonly expressed in Uniform Resource Identifier (URI) form. Your +admin interface might tell you what this is or you might need to read the +metadata document to find it. Look for the `entityID` attribute on the +`EntityDescriptor` element. +-- + +. Create one or more SAML realms. ++ +-- +SAML authentication is enabled by configuring a SAML realm within the +authentication chain for {es}. + +This realm has a few mandatory settings, and a number of optional settings. +The available settings are described in detail in the +<>. The following settings (in the `elasticsearch.yml` +configuration file) are the most common settings: + +[source, yaml] +------------------------------------------------------------ +xpack.security.authc.realms.saml1: <1> + type: saml <2> + order: 2 <3> + idp.metadata.path: saml/idp-metadata.xml <4> + idp.entity_id: "https://sso.example.com/" <5> + sp.entity_id: "https://kibana.example.com/" <6> + sp.acs: "https://kibana.example.com/api/security/v1/saml" <7> + sp.logout: "https://kibana.example.com/logout" <8> +------------------------------------------------------------ +<1> This setting defines a new authentication realm named "saml1". For an +introduction to realms, see {stack-ov}/realms.html[Realms]. +<2> The `type` must be `saml`. +<3> You should define a unique order on each realm in your authentication chain. +It is recommended that the SAML realm be at the bottom of your authentication +chain (that is, it has the _highest_ order). +<4> This is the path to the metadata file that you saved for your identity provider. +The path that you enter here is relative to your `config/` directory. {security} +automatically monitors this file for changes and reloads the configuration +whenever it is updated. +<5> This is the identifier (SAML EntityID) that your IdP uses. It should match +the `entityID` attribute within the metadata file. +<6> This is a unique identifier for your {kib} instance, expressed as a URI. +You will use this value when you add {kib} as a service provider within your IdP. +We recommend that you use the base URL for your {kib} instance as the entity ID. +<7> The Assertion Consumer Service (ACS) endpoint is the URL within {kib} that +accepts authentication messages from the IdP. This ACS endpoint supports the +SAML HTTP-POST binding only. It must be a URL that is accessible from the web +browser of the user who is attempting to login to {kib}; it does not need to be +directly accessible by {es} or the IdP. The correct value can vary depending on +how you have installed {kib} and whether there are any proxies involved, but it +is typically +$\{kibana-url}/api/security/v1/saml+ where _$\{kibana-url}_ is the +base URL for your {kib} instance. +<8> This is the URL within {kib} that accepts logout messages from the IdP. +Like the `sp.acs` URL, it must be accessible from the web browser, but does +not need to be directly accessible by {es} or the IdP. The correct value can +vary depending on how you have installed {kib} and whether there are any +proxies involved, but it will typically be +$\{kibana-url}/logout+ where +_$\{kibana-url}_ is the base URL for your {kib} instance. + +IMPORTANT: SAML is used when authenticating via {kib}, but it is not an +effective means of authenticating directly to the {es} REST API. For this reason +we recommend that you include at least one additional realm such as the +native realm in your authentication chain for use by API clients. + +For more information, see +{stack-ov}/saml-guide-authentication.html#saml-create-realm[Create a SAML realm]. +-- + +. Add attribute mappings. ++ +-- +When a user connects to {kib} through the identity provider, the IdP supplies a +SAML assertion that includes attributes for the user. You can configure the SAML +realm to map these attributes to properties on the authenticated user. + +The recommended steps for configuring these SAML attributes are as follows: +-- +.. Consult your IdP to see what user attributes it can provide. This varies +greatly between providers, but you should be able to obtain a list from the +documentation or from your local admin. + +.. Read through the list of user properties that {es} supports and decide which +of them are useful to you and can be provided by your IdP. At a minimum, the +`principal` attribute is required. The `groups` attribute is recommended. + +.. Configure your IdP to release those attributes to your {kib} SAML service +provider. ++ +-- +This process varies by provider - some provide a user interface for this, while +others might require that you edit configuration files. Usually the IdP (or your +local administrator) have suggestions about what URI to use for each attribute. +You can simply accept those suggestions, as the {es} service is entirely +configurable and does not require that any specific URIs are used. +-- + +.. Configure the SAML realm to associate the {es} user properties to the URIs +that you configured in your IdP. ++ +-- +For example, add the following settings to the `elasticsearch.yml` configuration +file: + +[source, yaml] +------------------------------------------------------------ +xpack.security.authc.realms.saml1: + ... + attributes.principal: "urn:oid:0.9.2342.19200300.100.1.1" + attributes.groups: "urn:oid:1.3.6.1.4.1.5923.1.5.1." +------------------------------------------------------------ + +For more information, see +{stack-ov}/saml-guide-authentication.html#saml-attribute-mapping[Attribute mapping]. +-- + +. (Optional) Configure logout services. ++ +-- +The SAML protocol supports the concept of Single Logout (SLO). The level of +support for SLO varies between identity providers. + +For more information, see +{stack-ov}/saml-guide-authentication.html#saml-logout[SAML logout]. +-- + +. (Optional) Configure encryption and signing. ++ +-- +The {stack} supports generating signed SAML messages (for authentication and/or +logout), verifying signed SAML messages from the IdP (for both authentication +and logout), and processing encrypted content. + +You can configure {es} for signing, encryption, or both, with the same or +separate keys. For more information, see +{stack-ov}/saml-guide-authentication.html#saml-enc-sign[Encryption and signing]. +-- + +. (Optional) Generate service provider metadata. ++ +-- +There are some extra configuration steps that are specific to each identity +provider. If your identity provider can import SP metadata, some of those steps +can be automated or expedited. You can generate SP metadata for the {stack} by +using the <>. +-- + +. Configure role mappings. ++ +-- +When a user authenticates using SAML, they are identified to the {stack}, +but this does not automatically grant them access to perform any actions or +access any data. + +Your SAML users cannot do anything until they are mapped to {security} +roles. See {stack-ov}/saml-role-mapping.html[Configuring role mappings]. +-- + +. {stack-ov}/saml-kibana.html[Configure {kib} to use SAML SSO]. + diff --git a/x-pack/docs/en/security/authentication/native-realm.asciidoc b/x-pack/docs/en/security/authentication/native-realm.asciidoc index f7b514b8144..6aa0a72fc84 100644 --- a/x-pack/docs/en/security/authentication/native-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/native-realm.asciidoc @@ -10,7 +10,7 @@ manage user passwords. [float] ==== Configuring a native realm -See {ref}/[Configuring a native realm]. +See {ref}/configuring-native-realm.html[Configuring a native realm]. [[native-settings]] ==== Native realm settings diff --git a/x-pack/docs/en/security/authentication/overview.asciidoc b/x-pack/docs/en/security/authentication/overview.asciidoc index da5f6a4ea3c..7633f02b676 100644 --- a/x-pack/docs/en/security/authentication/overview.asciidoc +++ b/x-pack/docs/en/security/authentication/overview.asciidoc @@ -24,28 +24,41 @@ attach your user credentials to the requests sent to {es}. For example, when using realms that support usernames and passwords you can simply attach {wikipedia}/Basic_access_authentication[basic auth] header to the requests. +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/built-in-users.asciidoc include::built-in-users.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/internal-users.asciidoc include::internal-users.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/realms.asciidoc include::realms.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/active-directory-realm.asciidoc include::active-directory-realm.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/file-realm.asciidoc include::file-realm.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/ldap-realm.asciidoc include::ldap-realm.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/native-realm.asciidoc include::native-realm.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/pki-realm.asciidoc include::pki-realm.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/saml-realm.asciidoc include::saml-realm.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/custom-realm.asciidoc include::custom-realm.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/anonymous-access.asciidoc include::anonymous-access.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/user-cache.asciidoc include::user-cache.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/saml-guide.asciidoc include::saml-guide.asciidoc[] diff --git a/x-pack/docs/en/security/authentication/saml-guide.asciidoc b/x-pack/docs/en/security/authentication/saml-guide.asciidoc index 740f51c877d..a57cfaec84c 100644 --- a/x-pack/docs/en/security/authentication/saml-guide.asciidoc +++ b/x-pack/docs/en/security/authentication/saml-guide.asciidoc @@ -22,6 +22,7 @@ the primary (or sole) authentication method for users of that {kib} instance. Once you enable SAML authentication in {kib} it will affect all users who try to login. The <> section provides more detail about how this works. +[[saml-guide-idp]] === The identity provider The Elastic Stack supports the SAML 2.0 _Web Browser SSO_ and the SAML @@ -70,6 +71,7 @@ For `` messages, the message itself must be signed, and the signature should be provided as a URL parameter, as required by the HTTP-Redirect binding. +[[saml-guide-authentication]] === Configure {es} for SAML authentication There are five configuration steps to enable SAML authentication in {es}: diff --git a/x-pack/docs/en/security/authorization/overview.asciidoc b/x-pack/docs/en/security/authorization/overview.asciidoc index 98a1ad8b786..e5b61e585c6 100644 --- a/x-pack/docs/en/security/authorization/overview.asciidoc +++ b/x-pack/docs/en/security/authorization/overview.asciidoc @@ -49,18 +49,26 @@ As an administrator, you will need to define the roles that you want to use, then assign users to the roles. These can be assigned to users in a number of ways depending on the realms by which the users are authenticated. +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authorization/built-in-roles.asciidoc include::built-in-roles.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authorization/managing-roles.asciidoc include::managing-roles.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authorization/privileges.asciidoc include::privileges.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authorization/alias-privileges.asciidoc include::alias-privileges.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authorization/mapping-roles.asciidoc include::mapping-roles.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authorization/field-and-document-access-control.asciidoc include::field-and-document-access-control.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authorization/run-as-privilege.asciidoc include::run-as-privilege.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authorization/custom-roles-provider.asciidoc include::custom-roles-provider.asciidoc[] diff --git a/x-pack/docs/en/security/ccs-clients-integrations.asciidoc b/x-pack/docs/en/security/ccs-clients-integrations.asciidoc index cbf4ede328e..f744e6d7092 100644 --- a/x-pack/docs/en/security/ccs-clients-integrations.asciidoc +++ b/x-pack/docs/en/security/ccs-clients-integrations.asciidoc @@ -32,14 +32,20 @@ or at least communicate with the cluster in a secured way: * {kibana-ref}/secure-reporting.html[Reporting] * {winlogbeat-ref}/securing-beats.html[Winlogbeat] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster.asciidoc include::ccs-clients-integrations/cross-cluster.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/ccs-clients-integrations/java.asciidoc include::ccs-clients-integrations/java.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/ccs-clients-integrations/http.asciidoc include::ccs-clients-integrations/http.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/ccs-clients-integrations/hadoop.asciidoc include::ccs-clients-integrations/hadoop.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/ccs-clients-integrations/beats.asciidoc include::ccs-clients-integrations/beats.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/ccs-clients-integrations/monitoring.asciidoc include::ccs-clients-integrations/monitoring.asciidoc[] diff --git a/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster.asciidoc b/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster.asciidoc index bf4800d50d2..eceb0315b20 100644 --- a/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster.asciidoc +++ b/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster.asciidoc @@ -155,4 +155,5 @@ GET two:logs-2017.04/_search <1> // TEST[skip:todo] //TBD: Is there a missing description of the <1> callout above? +:edit_url: https://github.com/elastic/kibana/edit/{branch}/x-pack/docs/en/security/cross-cluster-kibana.asciidoc include::{xkb-repo-dir}/security/cross-cluster-kibana.asciidoc[] diff --git a/x-pack/docs/en/security/configuring-es.asciidoc b/x-pack/docs/en/security/configuring-es.asciidoc index de3895d34b0..d8ef6c2809b 100644 --- a/x-pack/docs/en/security/configuring-es.asciidoc +++ b/x-pack/docs/en/security/configuring-es.asciidoc @@ -76,6 +76,7 @@ user API. ** <>. ** <>. ** <>. +** <>. . Set up roles and users to control access to {es}. For example, to grant _John Doe_ full access to all indices that match @@ -140,5 +141,6 @@ include::authentication/configuring-file-realm.asciidoc[] include::authentication/configuring-ldap-realm.asciidoc[] include::authentication/configuring-native-realm.asciidoc[] include::authentication/configuring-pki-realm.asciidoc[] +include::authentication/configuring-saml-realm.asciidoc[] include::{xes-repo-dir}/settings/security-settings.asciidoc[] include::{xes-repo-dir}/settings/audit-settings.asciidoc[] diff --git a/x-pack/docs/en/security/reference.asciidoc b/x-pack/docs/en/security/reference.asciidoc index 9c65fd6479a..ba770c15232 100644 --- a/x-pack/docs/en/security/reference.asciidoc +++ b/x-pack/docs/en/security/reference.asciidoc @@ -7,4 +7,5 @@ * {ref}/security-api.html[Security API] * {ref}/xpack-commands.html[Security Commands] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/reference/files.asciidoc include::reference/files.asciidoc[] diff --git a/x-pack/docs/en/security/securing-communications.asciidoc b/x-pack/docs/en/security/securing-communications.asciidoc index ef07f0113cb..11f6b3dc561 100644 --- a/x-pack/docs/en/security/securing-communications.asciidoc +++ b/x-pack/docs/en/security/securing-communications.asciidoc @@ -17,6 +17,7 @@ This section shows how to: The authentication of new nodes helps prevent a rogue node from joining the cluster and receiving data through replication. +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/securing-communications/setting-up-ssl.asciidoc include::securing-communications/setting-up-ssl.asciidoc[] //TO-DO: These sections can be removed when all links to them are removed. diff --git a/x-pack/docs/en/security/securing-communications/securing-elasticsearch.asciidoc b/x-pack/docs/en/security/securing-communications/securing-elasticsearch.asciidoc index da4e3a40b7d..09cb118f684 100644 --- a/x-pack/docs/en/security/securing-communications/securing-elasticsearch.asciidoc +++ b/x-pack/docs/en/security/securing-communications/securing-elasticsearch.asciidoc @@ -29,8 +29,17 @@ information, see <>. For more information about encrypting communications across the Elastic Stack, see {xpack-ref}/encrypting-communications.html[Encrypting Communications]. +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/securing-communications/node-certificates.asciidoc include::node-certificates.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/securing-communications/tls-transport.asciidoc include::tls-transport.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/securing-communications/tls-http.asciidoc include::tls-http.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/securing-communications/tls-ad.asciidoc include::tls-ad.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/securing-communications/tls-ldap.asciidoc include::tls-ldap.asciidoc[] \ No newline at end of file diff --git a/x-pack/docs/en/settings/audit-settings.asciidoc b/x-pack/docs/en/settings/audit-settings.asciidoc index 14e5d6fa28f..6274fae790b 100644 --- a/x-pack/docs/en/settings/audit-settings.asciidoc +++ b/x-pack/docs/en/settings/audit-settings.asciidoc @@ -121,7 +121,9 @@ To index audit events to a remote {es} cluster, you configure the following `xpack.security.audit.index.client.hosts`:: Specifies a comma-separated list of `host:port` pairs. These hosts should be -nodes in the remote cluster. +nodes in the remote cluster. If you are using default values for the +<> setting, you can omit the +`port` value. Otherwise, it must match the `transport.tcp.port` setting. `xpack.security.audit.index.client.cluster.name`:: Specifies the name of the remote cluster. diff --git a/x-pack/docs/en/sql/endpoints/cli.asciidoc b/x-pack/docs/en/sql/endpoints/cli.asciidoc index 8f217b61e45..edbb1dcace4 100644 --- a/x-pack/docs/en/sql/endpoints/cli.asciidoc +++ b/x-pack/docs/en/sql/endpoints/cli.asciidoc @@ -2,7 +2,7 @@ [[sql-cli]] == SQL CLI -X-Pack ships with a script to run the SQL CLI in its bin directory: +Elasticsearch ships with a script to run the SQL CLI in its `bin` directory: [source,bash] -------------------------------------------------- @@ -11,7 +11,7 @@ $ ./bin/elasticsearch-sql-cli The jar containing the SQL CLI is a stand alone Java application and the scripts just launch it. You can move it around to other machines -without having to install Elasticsearch or X-Pack on them. +without having to install Elasticsearch on them. You can pass the URL of the Elasticsearch instance to connect to as the first parameter: diff --git a/x-pack/docs/en/sql/endpoints/translate.asciidoc b/x-pack/docs/en/sql/endpoints/translate.asciidoc index 27821141130..9c1d71af5d3 100644 --- a/x-pack/docs/en/sql/endpoints/translate.asciidoc +++ b/x-pack/docs/en/sql/endpoints/translate.asciidoc @@ -23,8 +23,14 @@ Which returns: { "size" : 10, "docvalue_fields" : [ - "page_count", - "release_date" + { + "field": "page_count", + "format": "use_field_mapping" + }, + { + "field": "release_date", + "format": "epoch_millis" + } ], "_source": { "includes": [ diff --git a/x-pack/docs/en/watcher/actions.asciidoc b/x-pack/docs/en/watcher/actions.asciidoc index 72489443b3c..de2516b0589 100644 --- a/x-pack/docs/en/watcher/actions.asciidoc +++ b/x-pack/docs/en/watcher/actions.asciidoc @@ -259,20 +259,28 @@ PUT _xpack/watcher/watch/log_event_watch <1> A `condition` that only applies to the `notify_pager` action, which restricts its execution to when the condition succeeds (at least 5 hits in this case). +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/actions/email.asciidoc include::actions/email.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/actions/webhook.asciidoc include::actions/webhook.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/actions/index.asciidoc include::actions/index.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/actions/logging.asciidoc include::actions/logging.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/actions/hipchat.asciidoc include::actions/hipchat.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/actions/slack.asciidoc include::actions/slack.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/actions/pagerduty.asciidoc include::actions/pagerduty.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/actions/jira.asciidoc include::actions/jira.asciidoc[] [float] diff --git a/x-pack/docs/en/watcher/condition.asciidoc b/x-pack/docs/en/watcher/condition.asciidoc index e83981667d5..50424dc132a 100644 --- a/x-pack/docs/en/watcher/condition.asciidoc +++ b/x-pack/docs/en/watcher/condition.asciidoc @@ -28,12 +28,17 @@ conditions are met. In addition to the watch wide condition, you can also configure conditions per <>. +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/condition/always.asciidoc include::condition/always.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/condition/never.asciidoc include::condition/never.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/condition/compare.asciidoc include::condition/compare.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/condition/array-compare.asciidoc include::condition/array-compare.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/condition/script.asciidoc include::condition/script.asciidoc[] diff --git a/x-pack/docs/en/watcher/example-watches.asciidoc b/x-pack/docs/en/watcher/example-watches.asciidoc index 2d747caba5c..2a402b20261 100644 --- a/x-pack/docs/en/watcher/example-watches.asciidoc +++ b/x-pack/docs/en/watcher/example-watches.asciidoc @@ -9,6 +9,8 @@ For more example watches you can use as a starting point for building custom watches, see the https://github.com/elastic/examples/tree/master/Alerting[Example Watches] in the Elastic Examples repo. +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/example-watches/example-watch-clusterstatus.asciidoc include::example-watches/example-watch-clusterstatus.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/example-watches/example-watch-meetupdata.asciidoc include::example-watches/example-watch-meetupdata.asciidoc[] diff --git a/x-pack/docs/en/watcher/input.asciidoc b/x-pack/docs/en/watcher/input.asciidoc index d74f5cd80f1..6dee849c735 100644 --- a/x-pack/docs/en/watcher/input.asciidoc +++ b/x-pack/docs/en/watcher/input.asciidoc @@ -19,10 +19,14 @@ execution context. NOTE: If you don't define an input for a watch, an empty payload is loaded into the execution context. +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/input/simple.asciidoc include::input/simple.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/input/search.asciidoc include::input/search.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/input/http.asciidoc include::input/http.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/input/chain.asciidoc include::input/chain.asciidoc[] diff --git a/x-pack/docs/en/watcher/java.asciidoc b/x-pack/docs/en/watcher/java.asciidoc index bcf41252433..e5cb6b54b0c 100644 --- a/x-pack/docs/en/watcher/java.asciidoc +++ b/x-pack/docs/en/watcher/java.asciidoc @@ -101,20 +101,29 @@ XPackClient xpackClient = new XPackClient(client); WatcherClient watcherClient = xpackClient.watcher(); -------------------------------------------------- +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/put-watch.asciidoc include::java/put-watch.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/get-watch.asciidoc include::java/get-watch.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/delete-watch.asciidoc include::java/delete-watch.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/execute-watch.asciidoc include::java/execute-watch.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/ack-watch.asciidoc include::java/ack-watch.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/activate-watch.asciidoc include::java/activate-watch.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/deactivate-watch.asciidoc include::java/deactivate-watch.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/stats.asciidoc include::java/stats.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java/service.asciidoc include::java/service.asciidoc[] diff --git a/x-pack/docs/en/watcher/transform.asciidoc b/x-pack/docs/en/watcher/transform.asciidoc index 1b99d595b9c..0351c9b8c12 100644 --- a/x-pack/docs/en/watcher/transform.asciidoc +++ b/x-pack/docs/en/watcher/transform.asciidoc @@ -55,8 +55,11 @@ part of the definition of the `my_webhook` action. <1> A watch level `transform` <2> An action level `transform` +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/transform/search.asciidoc include::transform/search.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/transform/script.asciidoc include::transform/script.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/transform/chain.asciidoc include::transform/chain.asciidoc[] \ No newline at end of file diff --git a/x-pack/docs/en/watcher/trigger.asciidoc b/x-pack/docs/en/watcher/trigger.asciidoc index ee52dbba3bd..af830e829a4 100644 --- a/x-pack/docs/en/watcher/trigger.asciidoc +++ b/x-pack/docs/en/watcher/trigger.asciidoc @@ -9,4 +9,5 @@ the trigger and triggering the watch when needed. {watcher} is designed to support different types of triggers, but only time-based <> triggers are currently available. +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/trigger/schedule.asciidoc include::trigger/schedule.asciidoc[] diff --git a/x-pack/docs/en/watcher/trigger/schedule.asciidoc b/x-pack/docs/en/watcher/trigger/schedule.asciidoc index 7cd38c5fc9b..abbc3f5cfe8 100644 --- a/x-pack/docs/en/watcher/trigger/schedule.asciidoc +++ b/x-pack/docs/en/watcher/trigger/schedule.asciidoc @@ -26,16 +26,23 @@ once per minute. For more information about throttling, see * <> * <> +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/schedule/hourly.asciidoc include::schedule/hourly.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/schedule/daily.asciidoc include::schedule/daily.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/schedule/weekly.asciidoc include::schedule/weekly.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/schedule/monthly.asciidoc include::schedule/monthly.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/schedule/yearly.asciidoc include::schedule/yearly.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/schedule/cron.asciidoc include::schedule/cron.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/schedule/interval.asciidoc include::schedule/interval.asciidoc[] diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java index fa0c239aab1..99e6a10ad92 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java @@ -223,6 +223,7 @@ public class LicenseService extends AbstractLifecycleComponent implements Cluste @Override public ClusterState execute(ClusterState currentState) throws Exception { + XPackPlugin.checkReadyForXPackCustomMetadata(currentState); MetaData currentMetadata = currentState.metaData(); LicensesMetaData licensesMetaData = currentMetadata.custom(LicensesMetaData.TYPE); Version trialVersion = null; @@ -341,7 +342,7 @@ public class LicenseService extends AbstractLifecycleComponent implements Cluste if (clusterService.lifecycleState() == Lifecycle.State.STARTED) { final ClusterState clusterState = clusterService.state(); if (clusterState.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK) == false && - clusterState.nodes().getMasterNode() != null) { + clusterState.nodes().getMasterNode() != null && XPackPlugin.isReadyForXPackCustomMetadata(clusterState)) { final LicensesMetaData currentMetaData = clusterState.metaData().custom(LicensesMetaData.TYPE); boolean noLicense = currentMetaData == null || currentMetaData.getLicense() == null; if (clusterState.getNodes().isLocalNodeElectedMaster() && @@ -374,6 +375,12 @@ public class LicenseService extends AbstractLifecycleComponent implements Cluste final ClusterState previousClusterState = event.previousState(); final ClusterState currentClusterState = event.state(); if (!currentClusterState.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) { + if (XPackPlugin.isReadyForXPackCustomMetadata(currentClusterState) == false) { + logger.debug("cannot add license to cluster as the following nodes might not understand the license metadata: {}", + () -> XPackPlugin.nodesNotReadyForXPackCustomMetadata(currentClusterState)); + return; + } + final LicensesMetaData prevLicensesMetaData = previousClusterState.getMetaData().custom(LicensesMetaData.TYPE); final LicensesMetaData currentLicensesMetaData = currentClusterState.getMetaData().custom(LicensesMetaData.TYPE); if (logger.isDebugEnabled()) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartBasicClusterTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartBasicClusterTask.java index 355482872d6..0cf949a6990 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartBasicClusterTask.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartBasicClusterTask.java @@ -13,6 +13,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.Nullable; +import org.elasticsearch.xpack.core.XPackPlugin; import java.time.Clock; import java.util.Collections; @@ -59,6 +60,7 @@ public class StartBasicClusterTask extends ClusterStateUpdateTask { @Override public ClusterState execute(ClusterState currentState) throws Exception { + XPackPlugin.checkReadyForXPackCustomMetadata(currentState); LicensesMetaData licensesMetaData = currentState.metaData().custom(LicensesMetaData.TYPE); License currentLicense = LicensesMetaData.extractLicense(licensesMetaData); if (currentLicense == null || currentLicense.type().equals("basic") == false) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java index 355672dedf7..5c5c03151ba 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java @@ -13,6 +13,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.Nullable; +import org.elasticsearch.xpack.core.XPackPlugin; import java.time.Clock; import java.util.Collections; @@ -64,6 +65,7 @@ public class StartTrialClusterTask extends ClusterStateUpdateTask { @Override public ClusterState execute(ClusterState currentState) throws Exception { + XPackPlugin.checkReadyForXPackCustomMetadata(currentState); LicensesMetaData currentLicensesMetaData = currentState.metaData().custom(LicensesMetaData.TYPE); if (request.isAcknowledged() == false) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartupSelfGeneratedLicenseTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartupSelfGeneratedLicenseTask.java index 77695f64538..823283ac5a8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartupSelfGeneratedLicenseTask.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartupSelfGeneratedLicenseTask.java @@ -16,6 +16,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.core.XPackPlugin; import java.time.Clock; import java.util.UUID; @@ -49,6 +50,7 @@ public class StartupSelfGeneratedLicenseTask extends ClusterStateUpdateTask { @Override public ClusterState execute(ClusterState currentState) throws Exception { + XPackPlugin.checkReadyForXPackCustomMetadata(currentState); final MetaData metaData = currentState.metaData(); final LicensesMetaData currentLicensesMetaData = metaData.custom(LicensesMetaData.TYPE); // do not generate a license if any license is present diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java index 5ee46f3b3c9..77d521e2d43 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java @@ -9,15 +9,20 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; import org.bouncycastle.operator.OperatorCreationException; import org.elasticsearch.SpecialPermission; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.GenericAction; import org.elasticsearch.action.support.ActionFilter; import org.elasticsearch.client.Client; import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Booleans; import org.elasticsearch.common.inject.Binder; import org.elasticsearch.common.inject.Module; import org.elasticsearch.common.inject.multibindings.Multibinder; @@ -33,6 +38,7 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.license.LicenseService; +import org.elasticsearch.license.LicensesMetaData; import org.elasticsearch.license.Licensing; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.plugins.ExtensiblePlugin; @@ -46,10 +52,13 @@ import org.elasticsearch.xpack.core.action.TransportXPackInfoAction; import org.elasticsearch.xpack.core.action.TransportXPackUsageAction; import org.elasticsearch.xpack.core.action.XPackInfoAction; import org.elasticsearch.xpack.core.action.XPackUsageAction; +import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.core.rest.action.RestXPackInfoAction; import org.elasticsearch.xpack.core.rest.action.RestXPackUsageAction; +import org.elasticsearch.xpack.core.security.authc.TokenMetaData; import org.elasticsearch.xpack.core.ssl.SSLConfigurationReloader; import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.core.watcher.WatcherMetaData; import javax.security.auth.DestroyFailedException; @@ -62,14 +71,19 @@ import java.security.PrivilegedAction; import java.time.Clock; import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.List; import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; public class XPackPlugin extends XPackClientPlugin implements ScriptPlugin, ExtensiblePlugin { private static Logger logger = ESLoggerFactory.getLogger(XPackPlugin.class); private static DeprecationLogger deprecationLogger = new DeprecationLogger(logger); + public static final String XPACK_INSTALLED_NODE_ATTR = "xpack.installed"; + // TODO: clean up this library to not ask for write access to all system properties! static { // invoke this clinit in unbound with permissions to access all system properties @@ -138,6 +152,75 @@ public class XPackPlugin extends XPackClientPlugin implements ScriptPlugin, Exte public static LicenseService getSharedLicenseService() { return licenseService.get(); } public static XPackLicenseState getSharedLicenseState() { return licenseState.get(); } + /** + * Checks if the cluster state allows this node to add x-pack metadata to the cluster state, + * and throws an exception otherwise. + * This check should be called before installing any x-pack metadata to the cluster state, + * to ensure that the other nodes that are part of the cluster will be able to deserialize + * that metadata. Note that if the cluster state already contains x-pack metadata, this + * check assumes that the nodes are already ready to receive additional x-pack metadata. + * Having this check properly in place everywhere allows to install x-pack into a cluster + * using a rolling restart. + */ + public static void checkReadyForXPackCustomMetadata(ClusterState clusterState) { + if (alreadyContainsXPackCustomMetadata(clusterState)) { + return; + } + List notReadyNodes = nodesNotReadyForXPackCustomMetadata(clusterState); + if (notReadyNodes.isEmpty() == false) { + throw new IllegalStateException("The following nodes are not ready yet for enabling x-pack custom metadata: " + notReadyNodes); + } + } + + /** + * Checks if the cluster state allows this node to add x-pack metadata to the cluster state. + * See {@link #checkReadyForXPackCustomMetadata} for more details. + */ + public static boolean isReadyForXPackCustomMetadata(ClusterState clusterState) { + return alreadyContainsXPackCustomMetadata(clusterState) || nodesNotReadyForXPackCustomMetadata(clusterState).isEmpty(); + } + + /** + * Returns the list of nodes that won't allow this node from adding x-pack metadata to the cluster state. + * See {@link #checkReadyForXPackCustomMetadata} for more details. + */ + public static List nodesNotReadyForXPackCustomMetadata(ClusterState clusterState) { + // check that all nodes would be capable of deserializing newly added x-pack metadata + final List notReadyNodes = StreamSupport.stream(clusterState.nodes().spliterator(), false).filter(node -> { + final String xpackInstalledAttr = node.getAttributes().getOrDefault(XPACK_INSTALLED_NODE_ATTR, "false"); + + // The node attribute XPACK_INSTALLED_NODE_ATTR was only introduced in 6.3.0, so when + // we have an older node in this mixed-version cluster without any x-pack metadata, + // we want to prevent x-pack from adding custom metadata + return node.getVersion().before(Version.V_6_3_0) || Booleans.parseBoolean(xpackInstalledAttr) == false; + }).collect(Collectors.toList()); + + return notReadyNodes; + } + + private static boolean alreadyContainsXPackCustomMetadata(ClusterState clusterState) { + final MetaData metaData = clusterState.metaData(); + return metaData.custom(LicensesMetaData.TYPE) != null || + metaData.custom(MLMetadataField.TYPE) != null || + metaData.custom(WatcherMetaData.TYPE) != null || + clusterState.custom(TokenMetaData.TYPE) != null; + } + + @Override + public Settings additionalSettings() { + final String xpackInstalledNodeAttrSetting = "node.attr." + XPACK_INSTALLED_NODE_ATTR; + + if (settings.get(xpackInstalledNodeAttrSetting) != null) { + throw new IllegalArgumentException("Directly setting [" + xpackInstalledNodeAttrSetting + "] is not permitted"); + } + + if (transportClientMode) { + return super.additionalSettings(); + } else { + return Settings.builder().put(super.additionalSettings()).put(xpackInstalledNodeAttrSetting, "true").build(); + } + } + @Override public Collection createGuiceModules() { ArrayList modules = new ArrayList<>(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReader.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReader.java index 5779924bb27..8559ab0703b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReader.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReader.java @@ -193,9 +193,7 @@ public final class FieldSubsetReader extends FilterLeafReader { continue; } Map filteredValue = filter((Map)value, includeAutomaton, state); - if (filteredValue.isEmpty() == false) { - filtered.add(filteredValue); - } + filtered.add(filteredValue); } else if (value instanceof Iterable) { List filteredValue = filter((Iterable) value, includeAutomaton, initialState); if (filteredValue.isEmpty() == false) { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractLicenseServiceTestCase.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractLicenseServiceTestCase.java index 2f110f4f8a9..5bc33ae330a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractLicenseServiceTestCase.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractLicenseServiceTestCase.java @@ -18,14 +18,16 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.watcher.watch.ClockMock; import org.junit.After; import org.junit.Before; import java.nio.file.Path; +import java.util.Arrays; -import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; +import static java.util.Collections.singletonMap; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -66,6 +68,7 @@ public abstract class AbstractLicenseServiceTestCase extends ESTestCase { when(state.metaData()).thenReturn(metaData); final DiscoveryNode mockNode = getLocalNode(); when(discoveryNodes.getMasterNode()).thenReturn(mockNode); + when(discoveryNodes.spliterator()).thenReturn(Arrays.asList(mockNode).spliterator()); when(discoveryNodes.isLocalNodeElectedMaster()).thenReturn(false); when(state.nodes()).thenReturn(discoveryNodes); when(state.getNodes()).thenReturn(discoveryNodes); // it is really ridiculous we have nodes() and getNodes()... @@ -76,7 +79,8 @@ public abstract class AbstractLicenseServiceTestCase extends ESTestCase { } protected DiscoveryNode getLocalNode() { - return new DiscoveryNode("b", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); + return new DiscoveryNode("b", buildNewFakeTransportAddress(), singletonMap(XPackPlugin.XPACK_INSTALLED_NODE_ATTR, "true"), + emptySet(), Version.CURRENT); } @After diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/XPackPluginTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/XPackPluginTests.java new file mode 100644 index 00000000000..59731cab71d --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/XPackPluginTests.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core; + +import org.elasticsearch.Version; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; +import org.elasticsearch.xpack.core.security.authc.TokenMetaData; +import org.elasticsearch.xpack.core.ssl.SSLService; + +import java.util.Collections; +import java.util.Map; + +import static org.hamcrest.Matchers.containsString; + +public class XPackPluginTests extends ESTestCase { + + public void testXPackInstalledAttrClash() throws Exception { + Settings.Builder builder = Settings.builder(); + builder.put("node.attr." + XPackPlugin.XPACK_INSTALLED_NODE_ATTR, randomBoolean()); + if (randomBoolean()) { + builder.put(Client.CLIENT_TYPE_SETTING_S.getKey(), "transport"); + } + XPackPlugin xpackPlugin = createXPackPlugin(builder.put("path.home", createTempDir()).build()); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, xpackPlugin::additionalSettings); + assertThat(e.getMessage(), + containsString("Directly setting [node.attr." + XPackPlugin.XPACK_INSTALLED_NODE_ATTR + "] is not permitted")); + } + + public void testXPackInstalledAttrExists() throws Exception { + XPackPlugin xpackPlugin = createXPackPlugin(Settings.builder().put("path.home", createTempDir()).build()); + assertEquals("true", xpackPlugin.additionalSettings().get("node.attr." + XPackPlugin.XPACK_INSTALLED_NODE_ATTR)); + } + + public void testNodesNotReadyForXPackCustomMetadata() { + boolean compatible; + boolean nodesCompatible = true; + DiscoveryNodes.Builder discoveryNodes = DiscoveryNodes.builder(); + + for (int i = 0; i < randomInt(3); i++) { + final Version version = VersionUtils.randomVersion(random()); + final Map attributes; + if (randomBoolean() && version.onOrAfter(Version.V_6_3_0)) { + attributes = Collections.singletonMap(XPackPlugin.XPACK_INSTALLED_NODE_ATTR, "true"); + } else { + nodesCompatible = false; + attributes = Collections.emptyMap(); + } + + discoveryNodes.add(new DiscoveryNode("node_" + i, buildNewFakeTransportAddress(), attributes, Collections.emptySet(), + Version.CURRENT)); + } + ClusterState.Builder clusterStateBuilder = ClusterState.builder(ClusterName.DEFAULT); + + if (randomBoolean()) { + clusterStateBuilder.putCustom(TokenMetaData.TYPE, new TokenMetaData(Collections.emptyList(), new byte[0])); + compatible = true; + } else { + compatible = nodesCompatible; + } + + ClusterState clusterState = clusterStateBuilder.nodes(discoveryNodes.build()).build(); + + assertEquals(XPackPlugin.nodesNotReadyForXPackCustomMetadata(clusterState).isEmpty(), nodesCompatible); + assertEquals(XPackPlugin.isReadyForXPackCustomMetadata(clusterState), compatible); + + if (compatible == false) { + IllegalStateException e = expectThrows(IllegalStateException.class, + () -> XPackPlugin.checkReadyForXPackCustomMetadata(clusterState)); + assertThat(e.getMessage(), containsString("The following nodes are not ready yet for enabling x-pack custom metadata:")); + } + } + + private XPackPlugin createXPackPlugin(Settings settings) throws Exception { + return new XPackPlugin(settings, null){ + + @Override + protected void setSslService(SSLService sslService) { + // disable + } + + @Override + protected void setLicenseState(XPackLicenseState licenseState) { + // disable + } + }; + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReaderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReaderTests.java index 4c74e7f5d90..e71b0e5e8bd 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReaderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReaderTests.java @@ -716,6 +716,22 @@ public class FieldSubsetReaderTests extends ESTestCase { expected.put("foo", subArray); assertEquals(expected, filtered); + + // json array objects that have no matching fields should be left empty instead of being removed: + // (otherwise nested inner hit source filtering fails with AOOB) + map = new HashMap<>(); + map.put("foo", "value"); + List> values = new ArrayList<>(); + values.add(Collections.singletonMap("foo", "1")); + values.add(Collections.singletonMap("baz", "2")); + map.put("bar", values); + + include = new CharacterRunAutomaton(Automatons.patterns("bar.baz")); + filtered = FieldSubsetReader.filter(map, include, 0); + + expected = new HashMap<>(); + expected.put("bar", Arrays.asList(new HashMap<>(), Collections.singletonMap("baz", "2"))); + assertEquals(expected, filtered); } /** diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index bdefabdb294..a1714a8e3f5 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -316,12 +316,8 @@ public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlu } private void addMlNodeAttribute(Settings.Builder additionalSettings, String attrName, String value) { - // Unfortunately we cannot simply disallow any value, because the internal cluster integration - // test framework will restart nodes with settings copied from the node immediately before it - // was stopped. The best we can do is reject inconsistencies, and report this in a way that - // makes clear that setting the node attribute directly is not allowed. String oldValue = settings.get(attrName); - if (oldValue == null || oldValue.equals(value)) { + if (oldValue == null) { additionalSettings.put(attrName, value); } else { reportClashingNodeAttribute(attrName); @@ -487,7 +483,7 @@ public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlu new RestStartDatafeedAction(settings, restController), new RestStopDatafeedAction(settings, restController), new RestDeleteModelSnapshotAction(settings, restController), - new RestDeleteExpiredDataAction(settings, restController), + new RestDeleteExpiredDataAction(settings, restController), new RestForecastJobAction(settings, restController), new RestGetCalendarsAction(settings, restController), new RestPutCalendarAction(settings, restController), diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDatafeedAction.java index 220d97e89ba..8c9eabe6de1 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDatafeedAction.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.DeleteDatafeedAction; @@ -120,6 +121,7 @@ public class TransportDeleteDatafeedAction extends TransportMasterNodeAction XPackPlugin.bindFeatureSet(b, SecurityFeatureSet.class)); - + if (enabled == false) { modules.add(b -> { b.bind(Realms.class).toProvider(Providers.of(null)); // for SecurityFeatureSet @@ -903,15 +903,6 @@ public class Security extends Plugin implements ActionPlugin, IngestPlugin, Netw }; } - @Override - public Map> getInitialClusterStateCustomSupplier() { - if (enabled) { - return Collections.singletonMap(TokenMetaData.TYPE, () -> tokenService.get().getTokenMetaData()); - } else { - return Collections.emptyMap(); - } - } - @Override public Function> getFieldFilter() { if (enabled) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java index d23415f87df..2934fb8062d 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java @@ -8,6 +8,9 @@ package org.elasticsearch.xpack.security.authc; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.Priority; import org.elasticsearch.core.internal.io.IOUtils; import org.apache.lucene.util.UnicodeUtil; import org.elasticsearch.ElasticsearchSecurityException; @@ -63,6 +66,7 @@ import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.XPackField; import org.elasticsearch.xpack.core.security.ScrollHelper; @@ -107,6 +111,7 @@ import java.util.List; import java.util.Map; import java.util.Optional; import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Supplier; @@ -1327,6 +1332,8 @@ public final class TokenService extends AbstractComponent { @Override public ClusterState execute(ClusterState currentState) throws Exception { + XPackPlugin.checkReadyForXPackCustomMetadata(currentState); + if (tokenMetaData.equals(currentState.custom(TokenMetaData.TYPE))) { return currentState; } @@ -1347,6 +1354,15 @@ public final class TokenService extends AbstractComponent { return; } + if (state.nodes().isLocalNodeElectedMaster()) { + if (XPackPlugin.isReadyForXPackCustomMetadata(state)) { + installTokenMetadata(state.metaData()); + } else { + logger.debug("cannot add token metadata to cluster as the following nodes might not understand the metadata: {}", + () -> XPackPlugin.nodesNotReadyForXPackCustomMetadata(state)); + } + } + TokenMetaData custom = event.state().custom(TokenMetaData.TYPE); if (custom != null && custom.equals(getTokenMetaData()) == false) { logger.info("refresh keys"); @@ -1360,6 +1376,39 @@ public final class TokenService extends AbstractComponent { }); } + // to prevent too many cluster state update tasks to be queued for doing the same update + private final AtomicBoolean installTokenMetadataInProgress = new AtomicBoolean(false); + + private void installTokenMetadata(MetaData metaData) { + if (metaData.custom(TokenMetaData.TYPE) == null) { + if (installTokenMetadataInProgress.compareAndSet(false, true)) { + clusterService.submitStateUpdateTask("install-token-metadata", new ClusterStateUpdateTask(Priority.URGENT) { + @Override + public ClusterState execute(ClusterState currentState) { + XPackPlugin.checkReadyForXPackCustomMetadata(currentState); + + if (currentState.custom(TokenMetaData.TYPE) == null) { + return ClusterState.builder(currentState).putCustom(TokenMetaData.TYPE, getTokenMetaData()).build(); + } else { + return currentState; + } + } + + @Override + public void onFailure(String source, Exception e) { + installTokenMetadataInProgress.set(false); + logger.error("unable to install token metadata", e); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + installTokenMetadataInProgress.set(false); + } + }); + } + } + } + /** * For testing */ diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java index 00b46b332cb..815f2694276 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java @@ -47,6 +47,7 @@ import org.elasticsearch.xpack.security.support.SecurityIndexManager; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; +import org.junit.ClassRule; import org.junit.Rule; import org.junit.rules.ExternalResource; @@ -163,24 +164,6 @@ public abstract class SecurityIntegTestCase extends ESIntegTestCase { public static void destroyDefaultSettings() { SECURITY_DEFAULT_SETTINGS = null; customSecuritySettingsSource = null; - // Wait for the network threads to finish otherwise there is the possibility that one of - // the threads lingers and trips the thread leak detector - try { - GlobalEventExecutor.INSTANCE.awaitInactivity(5, TimeUnit.SECONDS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } catch (IllegalStateException e) { - if (e.getMessage().equals("thread was not started") == false) { - throw e; - } - // ignore since the thread was never started - } - - try { - ThreadDeathWatcher.awaitInactivity(5, TimeUnit.SECONDS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } } @Rule @@ -204,6 +187,35 @@ public abstract class SecurityIntegTestCase extends ESIntegTestCase { } }; + /** + * A JUnit class level rule that runs after the AfterClass method in {@link ESIntegTestCase}, + * which stops the cluster. After the cluster is stopped, there are a few netty threads that + * can linger, so we wait for them to finish otherwise these lingering threads can intermittently + * trigger the thread leak detector + */ + @ClassRule + public static final ExternalResource STOP_NETTY_RESOURCE = new ExternalResource() { + @Override + protected void after() { + try { + GlobalEventExecutor.INSTANCE.awaitInactivity(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } catch (IllegalStateException e) { + if (e.getMessage().equals("thread was not started") == false) { + throw e; + } + // ignore since the thread was never started + } + + try { + ThreadDeathWatcher.awaitInactivity(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + }; + @Before //before methods from the superclass are run before this, which means that the current cluster is ready to go public void assertXPackIsInstalled() { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySingleNodeTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySingleNodeTestCase.java index 1ee654c0baf..cda627806e7 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySingleNodeTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySingleNodeTestCase.java @@ -26,6 +26,7 @@ import org.elasticsearch.xpack.security.LocalStateSecurity; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; +import org.junit.ClassRule; import org.junit.Rule; import org.junit.rules.ExternalResource; @@ -97,25 +98,6 @@ public abstract class SecuritySingleNodeTestCase extends ESSingleNodeTestCase { IOUtils.closeWhileHandlingException(restClient); restClient = null; } - - // Wait for the network threads to finish otherwise there is the possibility that one of - // the threads lingers and trips the thread leak detector - try { - GlobalEventExecutor.INSTANCE.awaitInactivity(5, TimeUnit.SECONDS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } catch (IllegalStateException e) { - if (e.getMessage().equals("thread was not started") == false) { - throw e; - } - // ignore since the thread was never started - } - - try { - ThreadDeathWatcher.awaitInactivity(5, TimeUnit.SECONDS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } } @Rule @@ -130,6 +112,35 @@ public abstract class SecuritySingleNodeTestCase extends ESSingleNodeTestCase { } }; + /** + * A JUnit class level rule that runs after the AfterClass method in {@link ESIntegTestCase}, + * which stops the cluster. After the cluster is stopped, there are a few netty threads that + * can linger, so we wait for them to finish otherwise these lingering threads can intermittently + * trigger the thread leak detector + */ + @ClassRule + public static final ExternalResource STOP_NETTY_RESOURCE = new ExternalResource() { + @Override + protected void after() { + try { + GlobalEventExecutor.INSTANCE.awaitInactivity(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } catch (IllegalStateException e) { + if (e.getMessage().equals("thread was not started") == false) { + throw e; + } + // ignore since the thread was never started + } + + try { + ThreadDeathWatcher.awaitInactivity(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + }; + @Before //before methods from the superclass are run before this, which means that the current cluster is ready to go public void assertXPackIsInstalled() { diff --git a/x-pack/plugin/sql/jdbc/licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/x-pack/plugin/sql/jdbc/licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 50392f59374..00000000000 --- a/x-pack/plugin/sql/jdbc/licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bf8f9e8284a54af18545574cb4a530da0deb968a \ No newline at end of file diff --git a/x-pack/plugin/sql/jdbc/licenses/lucene-core-7.4.0-snapshot-cc2ee23050.jar.sha1 b/x-pack/plugin/sql/jdbc/licenses/lucene-core-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 00000000000..4aecfc6a550 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/licenses/lucene-core-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +e118e4d05070378516b9055184b74498ba528dee \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 50392f59374..00000000000 --- a/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bf8f9e8284a54af18545574cb4a530da0deb968a \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-cc2ee23050.jar.sha1 b/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 00000000000..4aecfc6a550 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +e118e4d05070378516b9055184b74498ba528dee \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateResponseTests.java b/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateResponseTests.java index 061b70a55d9..76f73fada06 100644 --- a/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateResponseTests.java +++ b/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateResponseTests.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.sql.plugin; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; import org.elasticsearch.test.AbstractStreamableTestCase; import java.io.IOException; @@ -18,7 +19,7 @@ public class SqlTranslateResponseTests extends AbstractStreamableTestCase sourceFields = new LinkedHashSet<>(); - final Set docFields = new LinkedHashSet<>(); + final Set docFields = new LinkedHashSet<>(); final Map scriptFields = new LinkedHashMap<>(); boolean trackScores = false; @@ -47,8 +49,8 @@ public class SqlSourceBuilder { /** * Retrieve the requested field from doc values (or fielddata) of the document */ - public void addDocField(String field) { - docFields.add(field); + public void addDocField(String field, String format) { + docFields.add(new FieldAndFormat(field, format)); } /** @@ -67,7 +69,8 @@ public class SqlSourceBuilder { if (!sourceFields.isEmpty()) { sourceBuilder.fetchSource(sourceFields.toArray(Strings.EMPTY_ARRAY), null); } - docFields.forEach(sourceBuilder::docValueField); + docFields.forEach(field -> sourceBuilder.docValueField(field.field, + field.format == null ? DocValueFieldsContext.USE_DEFAULT_FORMAT : field.format)); scriptFields.forEach(sourceBuilder::scriptField); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractor.java index 159127fb24c..66e17753054 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractor.java @@ -5,12 +5,16 @@ */ package org.elasticsearch.xpack.sql.execution.search.extractor; +import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.SearchHit; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.type.DataType; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; import org.joda.time.ReadableDateTime; import java.io.IOException; @@ -41,15 +45,17 @@ public class FieldHitExtractor implements HitExtractor { } private final String fieldName, hitName; + private final DataType dataType; private final boolean useDocValue; private final String[] path; - public FieldHitExtractor(String name, boolean useDocValue) { - this(name, useDocValue, null); + public FieldHitExtractor(String name, DataType dataType, boolean useDocValue) { + this(name, dataType, useDocValue, null); } - public FieldHitExtractor(String name, boolean useDocValue, String hitName) { + public FieldHitExtractor(String name, DataType dataType, boolean useDocValue, String hitName) { this.fieldName = name; + this.dataType = dataType; this.useDocValue = useDocValue; this.hitName = hitName; @@ -64,6 +70,16 @@ public class FieldHitExtractor implements HitExtractor { FieldHitExtractor(StreamInput in) throws IOException { fieldName = in.readString(); + if (in.getVersion().onOrAfter(Version.V_6_4_0)) { + String esType = in.readOptionalString(); + if (esType != null) { + dataType = DataType.fromEsType(esType); + } else { + dataType = null; + } + } else { + dataType = null; + } useDocValue = in.readBoolean(); hitName = in.readOptionalString(); path = sourcePath(fieldName, useDocValue, hitName); @@ -77,6 +93,9 @@ public class FieldHitExtractor implements HitExtractor { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(fieldName); + if (out.getVersion().onOrAfter(Version.V_6_4_0)) { + out.writeOptionalString(dataType == null ? null : dataType.esType); + } out.writeBoolean(useDocValue); out.writeOptionalString(hitName); } @@ -117,6 +136,9 @@ public class FieldHitExtractor implements HitExtractor { if (values instanceof Map) { throw new SqlIllegalArgumentException("Objects (returned by [{}]) are not supported", fieldName); } + if (values instanceof String && dataType == DataType.DATE) { + return new DateTime(Long.parseLong(values.toString()), DateTimeZone.UTC); + } if (values instanceof Long || values instanceof Double || values instanceof String || values instanceof Boolean || values instanceof ReadableDateTime) { return values; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessor.java index 6aa6b6a50e9..d135b8a0865 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessor.java @@ -9,9 +9,11 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; +import org.joda.time.DateTime; import org.joda.time.DateTimeFieldType; import org.joda.time.DateTimeZone; import org.joda.time.ReadableDateTime; +import org.joda.time.ReadableInstant; import java.io.IOException; import java.util.Objects; @@ -78,15 +80,21 @@ public class DateTimeProcessor implements Processor { return null; } - if (!(l instanceof ReadableDateTime)) { - throw new SqlIllegalArgumentException("A date/time is required; received {}", l); + ReadableDateTime dt; + if (l instanceof String) { + // 6.4+ + final long millis = Long.parseLong(l.toString()); + dt = new DateTime(millis, DateTimeZone.forTimeZone(timeZone)); + } else if (l instanceof ReadableInstant) { + // 6.3- + dt = (ReadableDateTime) l; + if (!TimeZone.getTimeZone("UTC").equals(timeZone)) { + dt = dt.toDateTime().withZone(DateTimeZone.forTimeZone(timeZone)); + } + } else { + throw new SqlIllegalArgumentException("A string or a date is required; received {}", l); } - ReadableDateTime dt = (ReadableDateTime) l; - - if (!TimeZone.getTimeZone("UTC").equals(timeZone)) { - dt = dt.toDateTime().withZone(DateTimeZone.forTimeZone(timeZone)); - } return extractor.extract(dt); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/QueryContainer.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/QueryContainer.java index bca180315d9..9f9c1bb21bb 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/QueryContainer.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/QueryContainer.java @@ -173,7 +173,7 @@ public class QueryContainer { // reference methods // private FieldExtraction topHitFieldRef(FieldAttribute fieldAttr) { - return new SearchHitFieldRef(aliasName(fieldAttr), fieldAttr.field().hasDocValues()); + return new SearchHitFieldRef(aliasName(fieldAttr), fieldAttr.field().getDataType(), fieldAttr.field().hasDocValues()); } private Tuple nestedHitFieldRef(FieldAttribute attr) { @@ -184,7 +184,8 @@ public class QueryContainer { Query q = rewriteToContainNestedField(query, attr.location(), attr.nestedParent().name(), name, attr.field().hasDocValues()); - SearchHitFieldRef nestedFieldRef = new SearchHitFieldRef(name, attr.field().hasDocValues(), attr.parent().name()); + SearchHitFieldRef nestedFieldRef = new SearchHitFieldRef(name, attr.field().getDataType(), + attr.field().hasDocValues(), attr.parent().name()); nestedRefs.add(nestedFieldRef); return new Tuple<>(new QueryContainer(q, aggs, columns, aliases, pseudoFunctions, scalarFunctions, sort, limit), nestedFieldRef); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/SearchHitFieldRef.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/SearchHitFieldRef.java index 6a7f24b447e..7f799108d28 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/SearchHitFieldRef.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/SearchHitFieldRef.java @@ -6,18 +6,21 @@ package org.elasticsearch.xpack.sql.querydsl.container; import org.elasticsearch.xpack.sql.execution.search.SqlSourceBuilder; +import org.elasticsearch.xpack.sql.type.DataType; public class SearchHitFieldRef extends FieldReference { private final String name; + private final DataType dataType; private final boolean docValue; private final String hitName; - public SearchHitFieldRef(String name, boolean useDocValueInsteadOfSource) { - this(name, useDocValueInsteadOfSource, null); + public SearchHitFieldRef(String name, DataType dataType, boolean useDocValueInsteadOfSource) { + this(name, dataType, useDocValueInsteadOfSource, null); } - public SearchHitFieldRef(String name, boolean useDocValueInsteadOfSource, String hitName) { + public SearchHitFieldRef(String name, DataType dataType, boolean useDocValueInsteadOfSource, String hitName) { this.name = name; + this.dataType = dataType; this.docValue = useDocValueInsteadOfSource; this.hitName = hitName; } @@ -31,6 +34,10 @@ public class SearchHitFieldRef extends FieldReference { return name; } + public DataType getDataType() { + return dataType; + } + public boolean useDocValue() { return docValue; } @@ -42,7 +49,8 @@ public class SearchHitFieldRef extends FieldReference { return; } if (docValue) { - sourceBuilder.addDocField(name); + String format = dataType == DataType.DATE ? "epoch_millis" : null; + sourceBuilder.addDocField(name, format); } else { sourceBuilder.addSourceField(name); } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlLicenseIT.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlLicenseIT.java index a97f66763a9..8988f70672a 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlLicenseIT.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlLicenseIT.java @@ -16,6 +16,7 @@ import org.elasticsearch.license.License; import org.elasticsearch.license.License.OperationMode; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import org.elasticsearch.transport.Netty4Plugin; @@ -150,7 +151,8 @@ public class SqlLicenseIT extends AbstractLicensesIntegrationTestCase { SqlTranslateResponse response = client().prepareExecute(SqlTranslateAction.INSTANCE).query("SELECT * FROM test").get(); SearchSourceBuilder source = response.source(); - assertThat(source.docValueFields(), Matchers.contains("count")); + assertThat(source.docValueFields(), Matchers.contains( + new DocValueFieldsContext.FieldAndFormat("count", DocValueFieldsContext.USE_DEFAULT_FORMAT))); FetchSourceContext fetchSource = source.fetchSource(); assertThat(fetchSource.includes(), Matchers.arrayContaining("data")); } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlTranslateActionIT.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlTranslateActionIT.java index 5de9cfca97a..a4c440eb9df 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlTranslateActionIT.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlTranslateActionIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.sql.action; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.xpack.sql.plugin.SqlTranslateAction; @@ -35,7 +36,9 @@ public class SqlTranslateActionIT extends AbstractSqlIntegTestCase { FetchSourceContext fetch = source.fetchSource(); assertEquals(true, fetch.fetchSource()); assertArrayEquals(new String[] { "data" }, fetch.includes()); - assertEquals(singletonList("count"), source.docValueFields()); + assertEquals( + singletonList(new DocValueFieldsContext.FieldAndFormat("count", DocValueFieldsContext.USE_DEFAULT_FORMAT)), + source.docValueFields()); assertEquals(singletonList(SortBuilders.fieldSort("count")), source.sorts()); } } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/SqlSourceBuilderTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/SqlSourceBuilderTests.java index 0d57ad97c98..6ee843c2c63 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/SqlSourceBuilderTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/SqlSourceBuilderTests.java @@ -24,8 +24,8 @@ public class SqlSourceBuilderTests extends ESTestCase { ssb.trackScores(); ssb.addSourceField("foo"); ssb.addSourceField("foo2"); - ssb.addDocField("bar"); - ssb.addDocField("bar2"); + ssb.addDocField("bar", null); + ssb.addDocField("bar2", null); final Script s = new Script("eggplant"); ssb.addScriptField("baz", s); final Script s2 = new Script("potato"); @@ -35,7 +35,7 @@ public class SqlSourceBuilderTests extends ESTestCase { assertTrue(source.trackScores()); FetchSourceContext fsc = source.fetchSource(); assertThat(Arrays.asList(fsc.includes()), contains("foo", "foo2")); - assertThat(source.docValueFields(), contains("bar", "bar2")); + assertThat(source.docValueFields().stream().map(ff -> ff.field).collect(Collectors.toList()), contains("bar", "bar2")); Map scriptFields = source.scriptFields() .stream() .collect(Collectors.toMap(SearchSourceBuilder.ScriptField::fieldName, SearchSourceBuilder.ScriptField::script)); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ComputingExtractorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ComputingExtractorTests.java index 74721eca22a..375de112fe8 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ComputingExtractorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ComputingExtractorTests.java @@ -70,7 +70,7 @@ public class ComputingExtractorTests extends AbstractWireSerializingTestCase documentFieldValues = Collections.singletonList(Long.toString(millis)); + SearchHit hit = new SearchHit(1); + DocumentField field = new DocumentField("my_date_field", documentFieldValues); + hit.fields(singletonMap("my_date_field", field)); + FieldHitExtractor extractor = new FieldHitExtractor("my_date_field", DataType.DATE, true); + assertEquals(new DateTime(millis, DateTimeZone.UTC), extractor.extract(hit)); + } + public void testGetSource() throws IOException { String fieldName = randomAlphaOfLength(5); - FieldHitExtractor extractor = new FieldHitExtractor(fieldName, false); + FieldHitExtractor extractor = new FieldHitExtractor(fieldName, null, false); int times = between(1, 1000); for (int i = 0; i < times; i++) { @@ -164,12 +177,12 @@ public class FieldHitExtractorTests extends AbstractWireSerializingTestCase map = singletonMap("a", singletonMap("b", singletonMap("c", value))); assertThat(fe.extractFromSource(map), is(value)); } public void testExtractSourceIncorrectPath() { - FieldHitExtractor fe = new FieldHitExtractor("a.b.c.d", false); + FieldHitExtractor fe = new FieldHitExtractor("a.b.c.d", null, false); Object value = randomNonNullValue(); Map map = singletonMap("a", singletonMap("b", singletonMap("c", value))); SqlException ex = expectThrows(SqlException.class, () -> fe.extractFromSource(map)); @@ -223,7 +236,7 @@ public class FieldHitExtractorTests extends AbstractWireSerializingTestCase map = singletonMap("a", asList(value, value)); SqlException ex = expectThrows(SqlException.class, () -> fe.extractFromSource(map)); diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/sql/translate.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/sql/translate.yml index b3d93e52988..f47ea2d4d7e 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/sql/translate.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/sql/translate.yml @@ -1,5 +1,10 @@ --- "Translate SQL": + - skip: + version: " - 6.3.99" + reason: format option was added in 6.4 + features: warnings + - do: bulk: refresh: true @@ -23,7 +28,8 @@ - str excludes: [] docvalue_fields: - - int + - field: int + format: use_field_mapping sort: - int: order: asc diff --git a/x-pack/plugin/watcher/src/main/bin/elasticsearch-croneval.bat b/x-pack/plugin/watcher/src/main/bin/elasticsearch-croneval.bat index 7fd983c9ba5..37ca14dd094 100644 Binary files a/x-pack/plugin/watcher/src/main/bin/elasticsearch-croneval.bat and b/x-pack/plugin/watcher/src/main/bin/elasticsearch-croneval.bat differ diff --git a/x-pack/plugin/watcher/src/main/bin/x-pack-watcher-env.bat b/x-pack/plugin/watcher/src/main/bin/x-pack-watcher-env.bat index 010c154eb5a..4c7f762dca2 100644 Binary files a/x-pack/plugin/watcher/src/main/bin/x-pack-watcher-env.bat and b/x-pack/plugin/watcher/src/main/bin/x-pack-watcher-env.bat differ diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/service/TransportWatcherServiceAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/service/TransportWatcherServiceAction.java index fa78208494f..6b2bb26ef45 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/service/TransportWatcherServiceAction.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/service/TransportWatcherServiceAction.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.watcher.WatcherMetaData; import org.elasticsearch.xpack.core.watcher.transport.actions.service.WatcherServiceAction; import org.elasticsearch.xpack.core.watcher.transport.actions.service.WatcherServiceRequest; @@ -86,6 +87,8 @@ public class TransportWatcherServiceAction extends TransportMasterNodeAction( Streams.copyToString(new InputStreamReader(response.getEntity().getContent(), StandardCharsets.UTF_8)),