diff --git a/.gitignore b/.gitignore index 053074dbe..26be4183a 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,7 @@ +.DS_Store +*.graphml +.springBeans + atlassian-ide-plugin.xml ## Ignore svn files diff --git a/pom.xml b/pom.xml index 6580190b6..fc3e71bde 100644 --- a/pom.xml +++ b/pom.xml @@ -19,7 +19,7 @@ 3.2.1 2.6 - 6.3.0 + 6.5.0 2.9.1 2.2.0.BUILD-SNAPSHOT spring.data.elasticsearch @@ -51,6 +51,24 @@ ${springdata.commons} + + org.springframework + spring-webflux + 5.1.0.RELEASE + + + + io.projectreactor.netty + reactor-netty + 0.8.0.RELEASE + + + + io.projectreactor + reactor-test + 3.2.0.RELEASE + + commons-lang diff --git a/src/main/java/org/springframework/data/elasticsearch/client/ElasticsearchClients.java b/src/main/java/org/springframework/data/elasticsearch/client/ElasticsearchClients.java new file mode 100644 index 000000000..95bc0be72 --- /dev/null +++ b/src/main/java/org/springframework/data/elasticsearch/client/ElasticsearchClients.java @@ -0,0 +1,197 @@ +/* + * Copyright 2018 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.data.elasticsearch.client; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; + +import org.apache.http.Header; +import org.apache.http.HttpHost; +import org.apache.http.message.BasicHeader; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.client.RestClientBuilder; +import org.elasticsearch.client.RestHighLevelClient; +import org.springframework.data.elasticsearch.client.reactive.DefaultReactiveElasticsearchClient; +import org.springframework.data.elasticsearch.client.reactive.ReactiveElasticsearchClient; +import org.springframework.http.HttpHeaders; +import org.springframework.util.Assert; + +/** + * Utility class for common access to Elasticsearch clients. {@link ElasticsearchClients} consolidates set up routines + * for the various drivers into a single place. + * + * @author Christoph Strobl + * @since 4.0 + */ +public final class ElasticsearchClients { + + private ElasticsearchClients() {} + + /** + * Start here to create a new client tailored to your needs. + * + * @return new instance of {@link ClientBuilderWithRequiredHost}. + */ + public static ClientBuilderWithRequiredHost createClient() { + return new ElasticsearchClientBuilderImpl(); + } + + /** + * @author Christoph Strobl + */ + public interface ElasticsearchClientBuilder { + + /** + * Apply the configuration to create a {@link ReactiveElasticsearchClient}. + * + * @return new instance of {@link ReactiveElasticsearchClient}. + */ + ReactiveElasticsearchClient reactive(); + + /** + * Apply the configuration to create a {@link RestHighLevelClient}. + * + * @return new instance of {@link RestHighLevelClient}. + */ + RestHighLevelClient rest(); + + /** + * Apply the configuration to create a {@link RestClient}. + * + * @return new instance of {@link RestClient}. + */ + default RestClient lowLevelRest() { + return rest().getLowLevelClient(); + } + } + + /** + * @author Christoph Strobl + */ + public interface ClientBuilderWithRequiredHost { + + /** + * @param host the {@literal host} and {@literal port} formatted as String {@literal host:port}. You may leave out + * {@literal http / https} and use {@link MaybeSecureClientBuilder#viaSsl() viaSsl}. + * @return the {@link MaybeSecureClientBuilder}. + */ + default MaybeSecureClientBuilder connectedTo(String host) { + return connectedTo(new String[] { host }); + } + + /** + * @param hosts the list of {@literal host} and {@literal port} combinations formatted as String + * {@literal host:port}. You may leave out {@literal http / https} and use + * {@link MaybeSecureClientBuilder#viaSsl() viaSsl}. + * @return the {@link MaybeSecureClientBuilder}. + */ + MaybeSecureClientBuilder connectedTo(String... hosts); + + /** + * Obviously for testing. + * + * @return the {@link MaybeSecureClientBuilder}. + */ + default MaybeSecureClientBuilder connectedToLocalhost() { + return connectedTo("localhost:9200"); + } + } + + /** + * @author Christoph Strobl + */ + public interface MaybeSecureClientBuilder extends ClientBuilderWithOptionalDefaultHeaders { + + /** + * Connect via {@literal https}
+ * NOTE You need to leave out the protocol in + * {@link ClientBuilderWithRequiredHost#connectedTo(String)}. + * + * @return the {@link ClientBuilderWithOptionalDefaultHeaders}. + */ + ClientBuilderWithOptionalDefaultHeaders viaSsl(); + } + + /** + * @author Christoph Strobl + */ + public interface ClientBuilderWithOptionalDefaultHeaders extends ElasticsearchClientBuilder { + + /** + * @param defaultHeaders + * @return the {@link ElasticsearchClientBuilder} + */ + ElasticsearchClientBuilder withDefaultHeaders(HttpHeaders defaultHeaders); + } + + private static class ElasticsearchClientBuilderImpl + implements ElasticsearchClientBuilder, ClientBuilderWithRequiredHost, MaybeSecureClientBuilder { + + private List hosts = new ArrayList<>(); + private HttpHeaders headers = HttpHeaders.EMPTY; + private String protocoll = "http"; + + @Override + public ReactiveElasticsearchClient reactive() { + return DefaultReactiveElasticsearchClient.create(headers, formattedHosts().toArray(new String[0])); + } + + @Override + public RestHighLevelClient rest() { + + HttpHost[] httpHosts = formattedHosts().stream().map(HttpHost::create).toArray(HttpHost[]::new); + RestClientBuilder builder = RestClient.builder(httpHosts); + + if (!headers.isEmpty()) { + + Header[] httpHeaders = headers.toSingleValueMap().entrySet().stream() + .map(it -> new BasicHeader(it.getKey(), it.getValue())).toArray(Header[]::new); + builder = builder.setDefaultHeaders(httpHeaders); + } + + return new RestHighLevelClient(builder); + } + + @Override + public MaybeSecureClientBuilder connectedTo(String... hosts) { + + Assert.notEmpty(hosts, "At least one host is required."); + this.hosts.addAll(Arrays.asList(hosts)); + return this; + } + + @Override + public ClientBuilderWithOptionalDefaultHeaders withDefaultHeaders(HttpHeaders defaultHeaders) { + + Assert.notNull(defaultHeaders, "DefaultHeaders must not be null!"); + this.headers = defaultHeaders; + return this; + } + + List formattedHosts() { + return hosts.stream().map(it -> it.startsWith("http") ? it : protocoll + "://" + it).collect(Collectors.toList()); + } + + @Override + public ClientBuilderWithOptionalDefaultHeaders viaSsl() { + this.protocoll = "https"; + return this; + } + } +} diff --git a/src/main/java/org/springframework/data/elasticsearch/client/ElasticsearchHost.java b/src/main/java/org/springframework/data/elasticsearch/client/ElasticsearchHost.java new file mode 100644 index 000000000..32044b4b2 --- /dev/null +++ b/src/main/java/org/springframework/data/elasticsearch/client/ElasticsearchHost.java @@ -0,0 +1,92 @@ +/* + * Copyright 2018 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.data.elasticsearch.client; + +import java.time.Instant; + +/** + * Value Object containing information about Elasticsearch cluster nodes. + * + * @author Christoph Strobl + * @since 4.0 + */ +public class ElasticsearchHost { + + private final String host; + private final State state; + private final Instant timestamp; + + public ElasticsearchHost(String host, State state) { + + this.host = host; + this.state = state; + this.timestamp = Instant.now(); + } + + /** + * @param host must not be {@literal null}. + * @return new instance of {@link ElasticsearchHost}. + */ + public static ElasticsearchHost online(String host) { + return new ElasticsearchHost(host, State.ONLINE); + } + + /** + * @param host must not be {@literal null}. + * @return new instance of {@link ElasticsearchHost}. + */ + public static ElasticsearchHost offline(String host) { + return new ElasticsearchHost(host, State.OFFLINE); + } + + /** + * @return {@literal true} if the last known {@link State} was {@link State#ONLINE} + */ + public boolean isOnline() { + return State.ONLINE.equals(state); + } + + /** + * @return never {@literal null}. + */ + public String getHost() { + return host; + } + + /** + * @return the last known {@link State}. + */ + public State getState() { + return state; + } + + /** + * @return the {@link Instant} the information was captured. + */ + public Instant getTimestamp() { + return timestamp; + } + + @Override + public String toString() { + return "ElasticsearchHost(" + host + ", " + state.name() + ")"; + } + + public enum State { + ONLINE, OFFLINE, UNKNOWN + } +} diff --git a/src/main/java/org/springframework/data/elasticsearch/client/NoReachableHostException.java b/src/main/java/org/springframework/data/elasticsearch/client/NoReachableHostException.java new file mode 100644 index 000000000..49b6cedcc --- /dev/null +++ b/src/main/java/org/springframework/data/elasticsearch/client/NoReachableHostException.java @@ -0,0 +1,45 @@ +/* + * Copyright 2018 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.data.elasticsearch.client; + +import java.util.Set; + +/** + * {@link RuntimeException} to be emitted / thrown when the cluster is down (aka none of the known nodes is reachable). + * + * @author Christoph Strobl + * @since 4.0 + */ +public class NoReachableHostException extends RuntimeException { + + public NoReachableHostException(Set hosts) { + super(createMessage(hosts)); + } + + public NoReachableHostException(Set hosts, Throwable cause) { + super(createMessage(hosts), cause); + } + + private static String createMessage(Set hosts) { + + if (hosts.size() == 1) { + return String.format("Host '%s' not reachable. Cluster state is offline.", hosts.iterator().next().getHost()); + } + + return String.format("No active host found in cluster. (%s) of (%s) nodes offline.", hosts.size(), hosts.size()); + } +} diff --git a/src/main/java/org/springframework/data/elasticsearch/client/NodeClientFactoryBean.java b/src/main/java/org/springframework/data/elasticsearch/client/NodeClientFactoryBean.java index 6c47d7d10..05b2e5eba 100644 --- a/src/main/java/org/springframework/data/elasticsearch/client/NodeClientFactoryBean.java +++ b/src/main/java/org/springframework/data/elasticsearch/client/NodeClientFactoryBean.java @@ -15,11 +15,15 @@ */ package org.springframework.data.elasticsearch.client; +import static java.util.Arrays.*; + import java.io.IOException; import java.io.InputStream; import java.util.Collection; + import org.elasticsearch.client.Client; import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.logging.LogConfigurator; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.node.InternalSettingsPreparer; import org.elasticsearch.node.Node; @@ -32,8 +36,6 @@ import org.springframework.beans.factory.FactoryBean; import org.springframework.beans.factory.InitializingBean; import org.springframework.util.StringUtils; -import static java.util.Arrays.*; - /** * NodeClientFactoryBean * @@ -41,7 +43,6 @@ import static java.util.Arrays.*; * @author Mohsin Husen * @author Ilkang Na */ - public class NodeClientFactoryBean implements FactoryBean, InitializingBean, DisposableBean { private static final Logger logger = LoggerFactory.getLogger(NodeClientFactoryBean.class); @@ -54,13 +55,22 @@ public class NodeClientFactoryBean implements FactoryBean, InitializingB private String pathConfiguration; public static class TestNode extends Node { + public TestNode(Settings preparedSettings, Collection> classpathPlugins) { - super(InternalSettingsPreparer.prepareEnvironment(preparedSettings, null), classpathPlugins); + + super(InternalSettingsPreparer.prepareEnvironment(preparedSettings, null), classpathPlugins, false); + } + + protected void registerDerivedNodeNameWithLogger(String nodeName) { + try { + LogConfigurator.setNodeName(nodeName); + } catch (Exception e) { + // nagh - just forget about it + } } } - NodeClientFactoryBean() { - } + NodeClientFactoryBean() {} public NodeClientFactoryBean(boolean local) { this.local = local; @@ -84,22 +94,18 @@ public class NodeClientFactoryBean implements FactoryBean, InitializingB @Override public void afterPropertiesSet() throws Exception { - nodeClient = (NodeClient) new TestNode( - Settings.builder().put(loadConfig()) - .put("transport.type", "netty4") - .put("http.type", "netty4") - .put("path.home", this.pathHome) - .put("path.data", this.pathData) - .put("cluster.name", this.clusterName) - .put("node.max_local_storage_nodes", 100) - .build(), asList(Netty4Plugin.class)).start().client(); + nodeClient = (NodeClient) new TestNode(Settings.builder().put(loadConfig()).put("transport.type", "netty4") + .put("http.type", "netty4").put("path.home", this.pathHome).put("path.data", this.pathData) + .put("cluster.name", this.clusterName).put("node.max_local_storage_nodes", 100).build(), + asList(Netty4Plugin.class)).start().client(); } private Settings loadConfig() throws IOException { if (!StringUtils.isEmpty(pathConfiguration)) { InputStream stream = getClass().getClassLoader().getResourceAsStream(pathConfiguration); if (stream != null) { - return Settings.builder().loadFromStream(pathConfiguration, getClass().getClassLoader().getResourceAsStream(pathConfiguration), false).build(); + return Settings.builder().loadFromStream(pathConfiguration, + getClass().getClassLoader().getResourceAsStream(pathConfiguration), false).build(); } logger.error(String.format("Unable to read node configuration from file [%s]", pathConfiguration)); } diff --git a/src/main/java/org/springframework/data/elasticsearch/client/reactive/DefaultReactiveElasticsearchClient.java b/src/main/java/org/springframework/data/elasticsearch/client/reactive/DefaultReactiveElasticsearchClient.java new file mode 100644 index 000000000..c10d57b49 --- /dev/null +++ b/src/main/java/org/springframework/data/elasticsearch/client/reactive/DefaultReactiveElasticsearchClient.java @@ -0,0 +1,518 @@ +/* + * Copyright 2018 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.data.elasticsearch.client.reactive; + +import org.springframework.data.elasticsearch.client.ElasticsearchHost; +import org.springframework.data.elasticsearch.client.NoReachableHostException; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.io.IOException; +import java.lang.reflect.Method; +import java.net.ConnectException; +import java.nio.charset.StandardCharsets; +import java.util.Collection; +import java.util.List; +import java.util.function.Function; + +import org.apache.http.util.EntityUtils; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.delete.DeleteResponse; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.get.MultiGetRequest; +import org.elasticsearch.action.get.MultiGetResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.main.MainRequest; +import org.elasticsearch.action.main.MainResponse; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.action.update.UpdateResponse; +import org.elasticsearch.client.Request; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.get.GetResult; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.search.SearchHit; +import org.reactivestreams.Publisher; +import org.springframework.core.ParameterizedTypeReference; +import org.springframework.dao.DataAccessResourceFailureException; +import org.springframework.data.elasticsearch.client.reactive.HostProvider.VerificationMode; +import org.springframework.data.elasticsearch.client.util.RequestConverters; +import org.springframework.http.HttpHeaders; +import org.springframework.http.HttpMethod; +import org.springframework.http.HttpStatus; +import org.springframework.http.MediaType; +import org.springframework.http.ResponseCookie; +import org.springframework.http.ResponseEntity; +import org.springframework.http.client.reactive.ClientHttpResponse; +import org.springframework.http.codec.HttpMessageReader; +import org.springframework.util.Assert; +import org.springframework.util.MultiValueMap; +import org.springframework.util.ReflectionUtils; +import org.springframework.util.StreamUtils; +import org.springframework.web.client.HttpClientErrorException; +import org.springframework.web.reactive.function.BodyExtractor; +import org.springframework.web.reactive.function.BodyExtractors; +import org.springframework.web.reactive.function.client.ClientResponse; +import org.springframework.web.reactive.function.client.ExchangeStrategies; +import org.springframework.web.reactive.function.client.WebClient; +import org.springframework.web.reactive.function.client.WebClient.RequestBodySpec; +import org.springframework.web.reactive.function.client.WebClientException; + +/** + * A {@link WebClient} based {@link ReactiveElasticsearchClient} that connects to an Elasticsearch cluster through HTTP. + * + * @author Christoph Strobl + * @since 4.0 + */ +public class DefaultReactiveElasticsearchClient implements ReactiveElasticsearchClient { + + private final HostProvider hostProvider; + + /** + * Create a new {@link DefaultReactiveElasticsearchClient} using the given hostProvider to obtain server connections. + * + * @param hostProvider must not be {@literal null}. + */ + public DefaultReactiveElasticsearchClient(HostProvider hostProvider) { + this.hostProvider = hostProvider; + } + + /** + * Create a new {@link DefaultReactiveElasticsearchClient} aware of the given nodes in the cluster.
+ * NOTE If the cluster requires authentication be sure to provide the according {@link HttpHeaders} + * correctly. + * + * @param headers Use {@link HttpHeaders} to provide eg. authentication data. Must not be {@literal null}. + * @param hosts must not be {@literal null} nor empty! + * @return new instance of {@link DefaultReactiveElasticsearchClient}. + */ + public static ReactiveElasticsearchClient create(HttpHeaders headers, String... hosts) { + + Assert.notEmpty(hosts, "Elasticsearch Cluster needs to consist of at least one host"); + + HostProvider hostProvider = HostProvider.provider(hosts); + return new DefaultReactiveElasticsearchClient( + headers.isEmpty() ? hostProvider : hostProvider.withDefaultHeaders(headers)); + } + + /* + * (non-Javadoc) + * @see org.springframework.data.elasticsearch.client.reactive.ReactiveElasticsearchClient#ping(org.springframework.http.HttpHeaders) + */ + @Override + public Mono ping(HttpHeaders headers) { + + return sendRequest(new MainRequest(), RequestCreator.ping(), RawActionResponse.class, headers) // + .map(response -> response.statusCode().is2xxSuccessful()) // + .onErrorResume(NoReachableHostException.class, error -> Mono.just(false)).next(); + } + + /* + * (non-Javadoc) + * @see org.springframework.data.elasticsearch.client.reactive.ReactiveElasticsearchClient#info(org.springframework.http.HttpHeaders) + */ + @Override + public Mono info(HttpHeaders headers) { + + return sendRequest(new MainRequest(), RequestCreator.info(), MainResponse.class, headers) // + .next(); + } + + /* + * (non-Javadoc) + * @see org.springframework.data.elasticsearch.client.reactive.ReactiveElasticsearchClient#get(org.springframework.http.HttpHeaderss, org.elasticsearch.action.get.GetRequest) + */ + @Override + public Mono get(HttpHeaders headers, GetRequest getRequest) { + + return sendRequest(getRequest, RequestCreator.get(), GetResponse.class, headers) // + .filter(GetResponse::isExists) // + .map(DefaultReactiveElasticsearchClient::getResponseToGetResult) // + .next(); + } + + /* + * (non-Javadoc) + * @see org.springframework.data.elasticsearch.client.reactive.ReactiveElasticsearchClient#multiGet(org.springframework.http.HttpHeaders, org.elasticsearch.action.get.MultiGetRequest) + */ + @Override + public Flux multiGet(HttpHeaders headers, MultiGetRequest multiGetRequest) { + + return sendRequest(multiGetRequest, RequestCreator.multiGet(), MultiGetResponse.class, headers) + .map(MultiGetResponse::getResponses) // + .flatMap(Flux::fromArray) // + .filter(it -> !it.isFailed() && it.getResponse().isExists()) // + .map(it -> DefaultReactiveElasticsearchClient.getResponseToGetResult(it.getResponse())); + } + + /* + * (non-Javadoc) + * @see org.springframework.data.elasticsearch.client.reactive.ReactiveElasticsearchClient#exists(org.springframework.http.HttpHeaders, org.elasticsearch.action.get.GetRequest) + */ + @Override + public Mono exists(HttpHeaders headers, GetRequest getRequest) { + + return sendRequest(getRequest, RequestCreator.exists(), RawActionResponse.class, headers) // + .map(response -> { + + if (response.statusCode().is2xxSuccessful()) { + return true; + } + + if (response.statusCode().is5xxServerError()) { + + throw new HttpClientErrorException(response.statusCode(), String.format( + "Exists request (%s) returned error code %s.", getRequest.toString(), response.statusCode().value())); + } + + return false; + }) // + .next(); + } + + /* + * (non-Javadoc) + * @see org.springframework.data.elasticsearch.client.reactive.ReactiveElasticsearchClient#ping(org.springframework.http.HttpHeaders, org.elasticsearch.action.index.IndexRequest) + */ + @Override + public Mono index(HttpHeaders headers, IndexRequest indexRequest) { + return sendRequest(indexRequest, RequestCreator.index(), IndexResponse.class, headers).publishNext(); + } + + /* + * (non-Javadoc) + * @see org.springframework.data.elasticsearch.client.reactive.ReactiveElasticsearchClient#ping(org.springframework.http.HttpHeaders, org.elasticsearch.action.update.UpdateRequest) + */ + @Override + public Mono update(HttpHeaders headers, UpdateRequest updateRequest) { + return sendRequest(updateRequest, RequestCreator.update(), UpdateResponse.class, headers).publishNext(); + } + + /* + * (non-Javadoc) + * @see org.springframework.data.elasticsearch.client.reactive.ReactiveElasticsearchClient#ping(org.springframework.http.HttpHeaders, org.elasticsearch.action.delete.DeleteRequest) + */ + @Override + public Mono delete(HttpHeaders headers, DeleteRequest deleteRequest) { + return sendRequest(deleteRequest, RequestCreator.delete(), DeleteResponse.class, headers).publishNext(); + } + + /* + * (non-Javadoc) + * @see org.springframework.data.elasticsearch.client.reactive.ReactiveElasticsearchClient#ping(org.springframework.http.HttpHeaders, org.elasticsearch.action.search.SearchRequest) + */ + @Override + public Flux search(HttpHeaders headers, SearchRequest searchRequest) { + + return sendRequest(searchRequest, RequestCreator.search(), SearchResponse.class, headers) // + .map(SearchResponse::getHits) // + .flatMap(Flux::fromIterable); + } + + /* + * (non-Javadoc) + * @see org.springframework.data.elasticsearch.client.reactive.ReactiveElasticsearchClient#ping(org.springframework.data.elasticsearch.client.reactive.ReactiveElasticsearchClient.ReactiveElasticsearchClientCallback) + */ + @Override + public Mono execute(ReactiveElasticsearchClientCallback callback) { + + return this.hostProvider.getActive(VerificationMode.LAZY) // + .flatMap(it -> callback.doWithClient(it)) // + .onErrorResume(throwable -> { + + if (throwable instanceof ConnectException) { + + return hostProvider.getActive(VerificationMode.FORCE) // + .flatMap(webClient -> callback.doWithClient(webClient)); + } + + return Mono.error(throwable); + }); + } + + @Override + public Mono status() { + + return hostProvider.clusterInfo() // + .map(it -> new ClientStatus(it.getNodes())); + } + + // --> Private Response helpers + + private static GetResult getResponseToGetResult(GetResponse response) { + + return new GetResult(response.getIndex(), response.getType(), response.getId(), response.getVersion(), + response.isExists(), response.getSourceAsBytesRef(), response.getFields()); + } + + // --> + + private Flux sendRequest(Req request, + Function converter, Class responseType, HttpHeaders headers) { + return sendRequest(converter.apply(request), responseType, headers); + } + + private Flux sendRequest(Request request, Class responseType, + HttpHeaders headers) { + + return execute(webClient -> sendRequest(webClient, request, headers)) + .flatMapMany(response -> readResponseBody(request, response, responseType)); + } + + private Mono sendRequest(WebClient webClient, Request request, HttpHeaders headers) { + + RequestBodySpec requestBodySpec = webClient.method(HttpMethod.valueOf(request.getMethod().toUpperCase())) // + .uri(request.getEndpoint(), request.getParameters()) // + .headers(theHeaders -> theHeaders.addAll(headers)); + + if (request.getEntity() != null) { + + requestBodySpec.contentType(MediaType.valueOf(request.getEntity().getContentType().getValue())); + requestBodySpec.body(bodyExtractor(request), String.class); + } + + return requestBodySpec // + .exchange() // + .onErrorReturn(ConnectException.class, ClientResponse.create(HttpStatus.SERVICE_UNAVAILABLE).build()); + } + + private Publisher bodyExtractor(Request request) { + + return Mono.fromSupplier(() -> { + + try { + return EntityUtils.toString(request.getEntity()); + } catch (IOException e) { + throw new RequestBodyEncodingException("Error encoding request", e); + } + }); + } + + private Publisher readResponseBody(Request request, ClientResponse response, Class responseType) { + + if (RawActionResponse.class.equals(responseType)) { + return Mono.just((T) new RawActionResponse(response)); + } + + if (response.statusCode().is5xxServerError()) { + + throw new HttpClientErrorException(response.statusCode(), + String.format("%s request to %s returned error code %s.", request.getMethod(), request.getEndpoint(), + response.statusCode().value())); + } + + return response.body(BodyExtractors.toDataBuffers()).flatMap(it -> { + try { + + String content = StreamUtils.copyToString(it.asInputStream(true), StandardCharsets.UTF_8); + + try { + + XContentParser contentParser = XContentType + .fromMediaTypeOrFormat( + response.headers().contentType().map(MediaType::toString).orElse(XContentType.JSON.mediaType())) + .xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, content); + + Method fromXContent = ReflectionUtils.findMethod(responseType, "fromXContent", XContentParser.class); + return Mono.just((T) ReflectionUtils.invokeMethod(fromXContent, responseType, contentParser)); + } catch (Exception parseFailure) { + + try { + + XContentParser errorParser = XContentType + .fromMediaTypeOrFormat( + response.headers().contentType().map(MediaType::toString).orElse(XContentType.JSON.mediaType())) + .xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, content); + + // return Mono.error to avoid ElasticsearchStatusException to be caught by outer catch. + return Mono.error(BytesRestResponse.errorFromXContent(errorParser)); + + } catch (Exception errorParseFailure) { + + // return Mono.error to avoid ElasticsearchStatusException to be caught by outer catch. + return Mono.error(new ElasticsearchStatusException("Unable to parse response body", + RestStatus.fromCode(response.statusCode().value()))); + } + } + } catch (IOException e) { + throw new DataAccessResourceFailureException("Error parsing XContent.", e); + } + }); + } + + static class RequestCreator { + + static Function search() { + return RequestConverters::search; + } + + static Function index() { + return RequestConverters::index; + } + + static Function get() { + return RequestConverters::get; + } + + static Function ping() { + return (request) -> RequestConverters.ping(); + } + + static Function info() { + return (request) -> RequestConverters.info(); + } + + static Function multiGet() { + return RequestConverters::multiGet; + } + + static Function exists() { + return RequestConverters::exists; + } + + static Function update() { + return RequestConverters::update; + } + + static Function delete() { + return RequestConverters::delete; + } + } + + public static class RequestBodyEncodingException extends WebClientException { + + RequestBodyEncodingException(String msg, Throwable ex) { + super(msg, ex); + } + } + + static class RawActionResponse extends ActionResponse implements ClientResponse { + + final ClientResponse delegate; + + RawActionResponse(ClientResponse delegate) { + this.delegate = delegate; + } + + public HttpStatus statusCode() { + return delegate.statusCode(); + } + + public int rawStatusCode() { + return delegate.rawStatusCode(); + } + + public Headers headers() { + return delegate.headers(); + } + + public MultiValueMap cookies() { + return delegate.cookies(); + } + + public ExchangeStrategies strategies() { + return delegate.strategies(); + } + + public T body(BodyExtractor extractor) { + return delegate.body(extractor); + } + + public Mono bodyToMono(Class elementClass) { + return delegate.bodyToMono(elementClass); + } + + public Mono bodyToMono(ParameterizedTypeReference typeReference) { + return delegate.bodyToMono(typeReference); + } + + public Flux bodyToFlux(Class elementClass) { + return delegate.bodyToFlux(elementClass); + } + + public Flux bodyToFlux(ParameterizedTypeReference typeReference) { + return delegate.bodyToFlux(typeReference); + } + + public Mono> toEntity(Class bodyType) { + return delegate.toEntity(bodyType); + } + + public Mono> toEntity(ParameterizedTypeReference typeReference) { + return delegate.toEntity(typeReference); + } + + public Mono>> toEntityList(Class elementType) { + return delegate.toEntityList(elementType); + } + + public Mono>> toEntityList(ParameterizedTypeReference typeReference) { + return delegate.toEntityList(typeReference); + } + + public static Builder from(ClientResponse other) { + return ClientResponse.from(other); + } + + public static Builder create(HttpStatus statusCode) { + return ClientResponse.create(statusCode); + } + + public static Builder create(HttpStatus statusCode, ExchangeStrategies strategies) { + return ClientResponse.create(statusCode, strategies); + } + + public static Builder create(HttpStatus statusCode, List> messageReaders) { + return ClientResponse.create(statusCode, messageReaders); + } + } + + /** + * Reactive client {@link ReactiveElasticsearchClient.Status} implementation. + * + * @author Christoph Strobl + */ + class ClientStatus implements Status { + + private final Collection connectedHosts; + + ClientStatus(Collection connectedHosts) { + this.connectedHosts = connectedHosts; + } + + /* + * (non-Javadoc) + * @see org.springframework.data.elasticsearch.client.reactive.ReactiveElasticsearchClient.Status#hosts() + */ + @Override + public Collection hosts() { + return connectedHosts; + } + } +} diff --git a/src/main/java/org/springframework/data/elasticsearch/client/reactive/HostProvider.java b/src/main/java/org/springframework/data/elasticsearch/client/reactive/HostProvider.java new file mode 100644 index 000000000..ae4ddbca3 --- /dev/null +++ b/src/main/java/org/springframework/data/elasticsearch/client/reactive/HostProvider.java @@ -0,0 +1,184 @@ +/* + * Copyright 2018 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.data.elasticsearch.client.reactive; + +import org.springframework.data.elasticsearch.client.NoReachableHostException; +import reactor.core.publisher.Mono; + +import java.util.Collections; +import java.util.Set; +import java.util.function.Consumer; + +import org.springframework.data.elasticsearch.client.ElasticsearchHost; +import org.springframework.http.HttpHeaders; +import org.springframework.util.Assert; +import org.springframework.web.reactive.function.client.WebClient; + +/** + * Infrastructure helper class aware of hosts within the cluster and the health of those allowing easy selection of + * active ones. + * + * @author Christoph Strobl + * @since 4.0 + */ +public interface HostProvider { + + /** + * Lookup an active host in {@link VerificationMode#LAZY lazy} mode utilizing cached {@link ElasticsearchHost}. + * + * @return the {@link Mono} emitting the active host or {@link Mono#error(Throwable) an error} if none found. + */ + default Mono lookupActiveHost() { + return lookupActiveHost(VerificationMode.LAZY); + } + + /** + * Lookup an active host in using the given {@link VerificationMode}. + * + * @param verificationMode + * @return the {@link Mono} emitting the active host or {@link Mono#error(Throwable) an error} + * ({@link NoReachableHostException}) if none found. + */ + Mono lookupActiveHost(VerificationMode verificationMode); + + /** + * Get the {@link WebClient} connecting to an active host utilizing cached {@link ElasticsearchHost}. + * + * @return the {@link Mono} emitting the client for an active host or {@link Mono#error(Throwable) an error} if none + * found. + */ + default Mono getActive() { + return getActive(VerificationMode.LAZY); + } + + /** + * Get the {@link WebClient} connecting to an active host. + * + * @param verificationMode must not be {@literal null}. + * @return the {@link Mono} emitting the client for an active host or {@link Mono#error(Throwable) an error} if none + * found. + */ + default Mono getActive(VerificationMode verificationMode) { + return getActive(verificationMode, getDefaultHeaders()); + } + + /** + * Get the {@link WebClient} with default {@link HttpHeaders} connecting to an active host. + * + * @param verificationMode must not be {@literal null}. + * @param headers must not be {@literal null}. + * @return the {@link Mono} emitting the client for an active host or {@link Mono#error(Throwable) an error} if none + * found. + */ + default Mono getActive(VerificationMode verificationMode, HttpHeaders headers) { + return lookupActiveHost(verificationMode).map(host -> createWebClient(host, headers)); + } + + /** + * Get the {@link WebClient} with default {@link HttpHeaders} connecting to the given host. + * + * @param host must not be {@literal null}. + * @param headers must not be {@literal null}. + * @return + */ + default WebClient createWebClient(String host, HttpHeaders headers) { + return WebClient.builder().baseUrl(host).defaultHeaders(defaultHeaders -> defaultHeaders.putAll(headers)).build(); + } + + /** + * Obtain information about known cluster nodes. + * + * @return the {@link Mono} emitting {@link ClusterInformation} when available. + */ + Mono clusterInfo(); + + /** + * Obtain the {@link HttpHeaders} to be used by default. + * + * @return never {@literal null}. {@link HttpHeaders#EMPTY} by default. + */ + HttpHeaders getDefaultHeaders(); + + /** + * Create a new instance of {@link HostProvider} applying the given headers by default. + * + * @param headers must not be {@literal null}. + * @return new instance of {@link HostProvider}. + */ + HostProvider withDefaultHeaders(HttpHeaders headers); + + /** + * Create a new instance of {@link HostProvider} calling the given {@link Consumer} on error. + * + * @param errorListener must not be {@literal null}. + * @return new instance of {@link HostProvider}. + */ + HostProvider withErrorListener(Consumer errorListener); + + /** + * Create a new {@link HostProvider} best suited for the given number of hosts. + * + * @param hosts must not be {@literal null} nor empty. + * @return new instance of {@link HostProvider}. + */ + static HostProvider provider(String... hosts) { + + Assert.notEmpty(hosts, "Please provide at least one host to connect to."); + + if (hosts.length == 1) { + return new SingleNodeHostProvider(HttpHeaders.EMPTY, (err) -> {}, hosts[0]); + } else { + return new MultiNodeHostProvider(HttpHeaders.EMPTY, (err) -> {}, hosts); + } + } + + /** + * @author Christoph Strobl + * @since 4.0 + */ + enum VerificationMode { + + /** + * Actively check for cluster node health. + */ + FORCE, + + /** + * Use cached data for cluster node health. + */ + LAZY + } + + /** + * Value object accumulating information about cluster an Elasticsearch cluster. + * + * @author Christoph Strobll + * @since 4.0. + */ + class ClusterInformation { + + private final Set nodes; + + public ClusterInformation(Set nodes) { + this.nodes = nodes; + } + + public Set getNodes() { + return Collections.unmodifiableSet(nodes); + } + } +} diff --git a/src/main/java/org/springframework/data/elasticsearch/client/reactive/MultiNodeHostProvider.java b/src/main/java/org/springframework/data/elasticsearch/client/reactive/MultiNodeHostProvider.java new file mode 100644 index 000000000..b9eaea53f --- /dev/null +++ b/src/main/java/org/springframework/data/elasticsearch/client/reactive/MultiNodeHostProvider.java @@ -0,0 +1,152 @@ +/* + * Copyright 2018 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.data.elasticsearch.client.reactive; + +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; +import reactor.util.function.Tuple2; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Consumer; + +import org.springframework.data.elasticsearch.client.ElasticsearchHost; +import org.springframework.data.elasticsearch.client.ElasticsearchHost.State; +import org.springframework.data.elasticsearch.client.NoReachableHostException; +import org.springframework.http.HttpHeaders; +import org.springframework.lang.Nullable; +import org.springframework.web.reactive.function.client.ClientResponse; + +/** + * @author Christoph Strobl + * @since 4.0 + */ +class MultiNodeHostProvider implements HostProvider { + + private final HttpHeaders headers; + private final Consumer errorListener; + private final Map hosts; + + MultiNodeHostProvider(HttpHeaders headers, Consumer errorListener, String... hosts) { + + this.headers = headers; + this.errorListener = errorListener; + this.hosts = new ConcurrentHashMap<>(); + for (String host : hosts) { + this.hosts.put(host, new ElasticsearchHost(host, State.UNKNOWN)); + } + } + + public Mono clusterInfo() { + return nodes(null).map(this::updateNodeState).buffer(hosts.size()) + .then(Mono.just(new ClusterInformation(new LinkedHashSet<>(this.hosts.values())))); + } + + Collection getCachedHostState() { + return hosts.values(); + } + + @Override + public HttpHeaders getDefaultHeaders() { + return this.headers; + } + + @Override + public Mono lookupActiveHost(VerificationMode verificationMode) { + + if (VerificationMode.LAZY.equals(verificationMode)) { + for (ElasticsearchHost entry : hosts()) { + if (entry.isOnline()) { + return Mono.just(entry.getHost()); + } + } + } + + return findActiveHostInKnownActives() // + .switchIfEmpty(findActiveHostInUnresolved()) // + .switchIfEmpty(findActiveHostInDead()) // + .switchIfEmpty(Mono.error(() -> new NoReachableHostException(new LinkedHashSet<>(getCachedHostState())))); + } + + @Override + public HostProvider withDefaultHeaders(HttpHeaders headers) { + return new MultiNodeHostProvider(headers, errorListener, hosts.keySet().toArray(new String[0])); + } + + @Override + public HostProvider withErrorListener(Consumer errorListener) { + return new MultiNodeHostProvider(headers, errorListener, hosts.keySet().toArray(new String[0])); + } + + private Mono findActiveHostInKnownActives() { + return findActiveForSate(State.ONLINE); + } + + private Mono findActiveHostInUnresolved() { + return findActiveForSate(State.UNKNOWN); + } + + private Mono findActiveHostInDead() { + return findActiveForSate(State.OFFLINE); + } + + private Mono findActiveForSate(State state) { + return nodes(state).map(this::updateNodeState).filter(ElasticsearchHost::isOnline).map(it -> it.getHost()).next(); + } + + private ElasticsearchHost updateNodeState(Tuple2 tuple2) { + + State state = tuple2.getT2().statusCode().isError() ? State.OFFLINE : State.ONLINE; + ElasticsearchHost elasticsearchHost = new ElasticsearchHost(tuple2.getT1(), state); + hosts.put(tuple2.getT1(), elasticsearchHost); + return elasticsearchHost; + } + + private Flux> nodes(@Nullable State state) { + + return Flux.fromIterable(hosts()) // + .filter(entry -> state != null ? entry.getState().equals(state) : true) // + .map(ElasticsearchHost::getHost) // + .flatMap(host -> { + + Mono exchange = createWebClient(host, headers) // + .head().uri("/").exchange().doOnError(throwable -> { + + hosts.put(host, new ElasticsearchHost(host, State.OFFLINE)); + errorListener.accept(throwable); + }); + + return Mono.just(host).zipWith(exchange); + }) // + .onErrorContinue((throwable, o) -> { + errorListener.accept(throwable); + }); + } + + private List hosts() { + + List hosts = new ArrayList<>(this.hosts.values()); + Collections.shuffle(hosts); + + return hosts; + } +} diff --git a/src/main/java/org/springframework/data/elasticsearch/client/reactive/ReactiveElasticsearchClient.java b/src/main/java/org/springframework/data/elasticsearch/client/reactive/ReactiveElasticsearchClient.java new file mode 100644 index 000000000..89c76273b --- /dev/null +++ b/src/main/java/org/springframework/data/elasticsearch/client/reactive/ReactiveElasticsearchClient.java @@ -0,0 +1,407 @@ +/* + * Copyright 2018 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.data.elasticsearch.client.reactive; + +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.net.ConnectException; +import java.util.Collection; +import java.util.function.Consumer; + +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.delete.DeleteResponse; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.MultiGetRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.main.MainResponse; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.action.update.UpdateResponse; +import org.elasticsearch.index.get.GetResult; +import org.elasticsearch.search.SearchHit; +import org.springframework.data.elasticsearch.client.ElasticsearchHost; +import org.springframework.http.HttpHeaders; +import org.springframework.util.CollectionUtils; +import org.springframework.web.reactive.function.client.ClientResponse; +import org.springframework.web.reactive.function.client.WebClient; + +/** + * A reactive client to connect to Elasticsearch.
+ * + * @author Christoph Strobl + * @since 4.0 + */ +public interface ReactiveElasticsearchClient { + + /** + * Pings the remote Elasticsearch cluster and emits {@literal true} if the ping succeeded, {@literal false} otherwise. + * + * @return the {@link Mono} emitting the result of the ping attempt. + */ + default Mono ping() { + return ping(HttpHeaders.EMPTY); + } + + /** + * Pings the remote Elasticsearch cluster and emits {@literal true} if the ping succeeded, {@literal false} otherwise. + * + * @param headers Use {@link HttpHeaders} to provide eg. authentication data. Must not be {@literal null}. + * @return the {@link Mono} emitting the result of the ping attempt. + */ + Mono ping(HttpHeaders headers); + + /** + * Get the cluster info otherwise provided when sending an HTTP request to port 9200. + * + * @return the {@link Mono} emitting the result of the info request. + */ + default Mono info() { + return info(HttpHeaders.EMPTY); + } + + /** + * Get the cluster info otherwise provided when sending an HTTP request to port 9200. + * + * @param headers Use {@link HttpHeaders} to provide eg. authentication data. Must not be {@literal null}. + * @return the {@link Mono} emitting the result of the info request. + */ + Mono info(HttpHeaders headers); + + /** + * Execute the given {@link GetRequest} against the {@literal get} API to retrieve a document by id. + * + * @param getRequest must not be {@literal null}. + * @see Get API on + * elastic.co + * @return the {@link Mono} emitting the {@link GetResult result}. + */ + default Mono get(GetRequest getRequest) { + return get(HttpHeaders.EMPTY, getRequest); + } + + /** + * Execute a {@link GetRequest} against the {@literal get} API to retrieve a document by id. + * + * @param consumer never {@literal null}. + * @see Get API on + * elastic.co + * @return the {@link Mono} emitting the {@link GetResult result}. + */ + default Mono get(Consumer consumer) { + + GetRequest request = new GetRequest(); + consumer.accept(request); + return get(request); + } + + /** + * Execute the given {@link GetRequest} against the {@literal get} API to retrieve a document by id. + * + * @param headers Use {@link HttpHeaders} to provide eg. authentication data. Must not be {@literal null}. + * @param getRequest must not be {@literal null}. + * @see Get API on + * elastic.co + * @return the {@link Mono} emitting the {@link GetResult result}. + */ + Mono get(HttpHeaders headers, GetRequest getRequest); + + /** + * Execute the given {@link MultiGetRequest} against the {@literal multi-get} API to retrieve multiple documents by + * id. + * + * @param multiGetRequest must not be {@literal null}. + * @see Multi Get API on + * elastic.co + * @return the {@link Flux} emitting the {@link GetResult result}. + */ + default Flux multiGet(MultiGetRequest multiGetRequest) { + return multiGet(HttpHeaders.EMPTY, multiGetRequest); + } + + /** + * Execute a {@link MultiGetRequest} against the {@literal multi-get} API to retrieve multiple documents by id. + * + * @param consumer never {@literal null}. + * @see Multi Get API on + * elastic.co + * @return the {@link Flux} emitting the {@link GetResult result}. + */ + default Flux multiGet(Consumer consumer) { + + MultiGetRequest request = new MultiGetRequest(); + consumer.accept(request); + return multiGet(request); + } + + /** + * Execute the given {@link MultiGetRequest} against the {@literal multi-get} API to retrieve multiple documents by + * id. + * + * @param headers Use {@link HttpHeaders} to provide eg. authentication data. Must not be {@literal null}. + * @param multiGetRequest must not be {@literal null}. + * @see Multi Get API on + * elastic.co + * @return the {@link Flux} emitting the {@link GetResult result}. + */ + Flux multiGet(HttpHeaders headers, MultiGetRequest multiGetRequest); + + /** + * Checks for the existence of a document. Emits {@literal true} if it exists, {@literal false} otherwise. + * + * @param getRequest must not be {@literal null}. + * @return the {@link Mono} emitting {@literal true} if it exists, {@literal false} otherwise. + */ + default Mono exists(GetRequest getRequest) { + return exists(HttpHeaders.EMPTY, getRequest); + } + + /** + * Checks for the existence of a document. Emits {@literal true} if it exists, {@literal false} otherwise. + * + * @param consumer never {@literal null}. + * @return the {@link Mono} emitting {@literal true} if it exists, {@literal false} otherwise. + */ + default Mono exists(Consumer consumer) { + + GetRequest request = new GetRequest(); + consumer.accept(request); + return exists(request); + } + + /** + * Checks for the existence of a document. Emits {@literal true} if it exists, {@literal false} otherwise. + * + * @param headers Use {@link HttpHeaders} to provide eg. authentication data. Must not be {@literal null}. + * @param getRequest must not be {@literal null}. + * @return the {@link Mono} emitting {@literal true} if it exists, {@literal false} otherwise. + */ + Mono exists(HttpHeaders headers, GetRequest getRequest); + + /** + * Execute the given {@link IndexRequest} against the {@literal index} API to index a document. + * + * @param indexRequest must not be {@literal null}. + * @see Index API on + * elastic.co + * @return the {@link Mono} emitting the {@link IndexResponse}. + */ + default Mono index(IndexRequest indexRequest) { + return index(HttpHeaders.EMPTY, indexRequest); + } + + /** + * Execute an {@link IndexRequest} against the {@literal index} API to index a document. + * + * @param consumer never {@literal null}. + * @see Index API on + * elastic.co + * @return the {@link Mono} emitting the {@link IndexResponse}. + */ + default Mono index(Consumer consumer) { + + IndexRequest request = new IndexRequest(); + consumer.accept(request); + return index(request); + } + + /** + * Execute the given {@link IndexRequest} against the {@literal index} API to index a document. + * + * @param headers Use {@link HttpHeaders} to provide eg. authentication data. Must not be {@literal null}. + * @param indexRequest must not be {@literal null}. + * @see Index API on + * elastic.co + * @return the {@link Mono} emitting the {@link IndexResponse}. + */ + Mono index(HttpHeaders headers, IndexRequest indexRequest); + + /** + * Execute the given {@link UpdateRequest} against the {@literal update} API to alter a document. + * + * @param updateRequest must not be {@literal null}. + * @see Update API on + * elastic.co + * @return the {@link Mono} emitting the {@link UpdateResponse}. + */ + default Mono update(UpdateRequest updateRequest) { + return update(HttpHeaders.EMPTY, updateRequest); + } + + /** + * Execute an {@link UpdateRequest} against the {@literal update} API to alter a document. + * + * @param consumer never {@literal null}. + * @see Update API on + * elastic.co + * @return the {@link Mono} emitting the {@link UpdateResponse}. + */ + default Mono update(Consumer consumer) { + + UpdateRequest request = new UpdateRequest(); + consumer.accept(request); + return update(request); + } + + /** + * Execute the given {@link UpdateRequest} against the {@literal update} API to alter a document. + * + * @param headers Use {@link HttpHeaders} to provide eg. authentication data. Must not be {@literal null}. + * @param updateRequest must not be {@literal null}. + * @see Update API on + * elastic.co + * @return the {@link Mono} emitting the {@link UpdateResponse}. + */ + Mono update(HttpHeaders headers, UpdateRequest updateRequest); + + /** + * Execute the given {@link DeleteRequest} against the {@literal delete} API to remove a document. + * + * @param deleteRequest must not be {@literal null}. + * @see Delete API on + * elastic.co + * @return the {@link Mono} emitting the {@link DeleteResponse}. + */ + default Mono delete(DeleteRequest deleteRequest) { + return delete(HttpHeaders.EMPTY, deleteRequest); + } + + /** + * Execute a {@link DeleteRequest} against the {@literal delete} API to remove a document. + * + * @param consumer never {@literal null}. + * @see Delete API on + * elastic.co + * @return the {@link Mono} emitting the {@link DeleteResponse}. + */ + default Mono delete(Consumer consumer) { + + DeleteRequest request = new DeleteRequest(); + consumer.accept(request); + return delete(request); + } + + /** + * Execute the given {@link DeleteRequest} against the {@literal delete} API to remove a document. + * + * @param headers Use {@link HttpHeaders} to provide eg. authentication data. Must not be {@literal null}. + * @param deleteRequest must not be {@literal null}. + * @see Delete API on + * elastic.co + * @return the {@link Mono} emitting the {@link DeleteResponse}. + */ + Mono delete(HttpHeaders headers, DeleteRequest deleteRequest); + + /** + * Execute the given {@link SearchRequest} against the {@literal search} API. + * + * @param searchRequest must not be {@literal null}. + * @see Search API on + * elastic.co + * @return the {@link Flux} emitting {@link SearchHit hits} one by one. + */ + default Flux search(SearchRequest searchRequest) { + return search(HttpHeaders.EMPTY, searchRequest); + } + + /** + * Execute a {@link SearchRequest} against the {@literal search} API. + * + * @param consumer never {@literal null}. + * @see Search API on + * elastic.co + * @return the {@link Flux} emitting {@link SearchHit hits} one by one. + */ + default Flux search(Consumer consumer) { + + SearchRequest request = new SearchRequest(); + consumer.accept(request); + return search(request); + } + + /** + * Execute the given {@link SearchRequest} against the {@literal search} API. + * + * @param headers Use {@link HttpHeaders} to provide eg. authentication data. Must not be {@literal null}. + * @param searchRequest must not be {@literal null}. + * @see Search API on + * elastic.co + * @return the {@link Flux} emitting {@link SearchHit hits} one by one. + */ + Flux search(HttpHeaders headers, SearchRequest searchRequest); + + /** + * Compose the actual command/s to run against Elasticsearch using the underlying {@link WebClient connection}. + * {@link #execute(ReactiveElasticsearchClientCallback) Execute} selects an active server from the available ones and + * retries operations that fail with a {@link ConnectException} on another node if the previously selected one becomes + * unavailable. + * + * @param callback the {@link ReactiveElasticsearchClientCallback callback} wielding the actual command to run. + * @return the {@link Mono} emitting the {@link ClientResponse} once subscribed. + */ + Mono execute(ReactiveElasticsearchClientCallback callback); + + /** + * Get the current client {@link Status}.
+ * NOTE the actual implementation might choose to actively check the current cluster state by pinging + * known nodes. + * + * @return + */ + Mono status(); + + /** + * Low level callback interface operating upon {@link WebClient} to send commands towards elasticsearch. + * + * @author Christoph Strobl + * @since 4.0 + */ + interface ReactiveElasticsearchClientCallback { + Mono doWithClient(WebClient client); + } + + /** + * Cumulative client {@link ElasticsearchHost} information. + * + * @author Christoph Strobl + * @since 4.0 + */ + interface Status { + + /** + * Get the list of known hosts and their getCachedHostState. + * + * @return never {@literal null}. + */ + Collection hosts(); + + /** + * @return {@literal true} if at least one host is available. + */ + default boolean isOk() { + + Collection hosts = hosts(); + + if (CollectionUtils.isEmpty(hosts)) { + return false; + } + + return !hosts().stream().filter(it -> !it.isOnline()).findFirst().isPresent(); + } + } +} diff --git a/src/main/java/org/springframework/data/elasticsearch/client/reactive/SingleNodeHostProvider.java b/src/main/java/org/springframework/data/elasticsearch/client/reactive/SingleNodeHostProvider.java new file mode 100644 index 000000000..57aa2ee94 --- /dev/null +++ b/src/main/java/org/springframework/data/elasticsearch/client/reactive/SingleNodeHostProvider.java @@ -0,0 +1,107 @@ +/* + * Copyright 2018 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.data.elasticsearch.client.reactive; + +import reactor.core.publisher.Mono; + +import java.util.Collections; +import java.util.function.Consumer; + +import org.springframework.data.elasticsearch.client.ElasticsearchHost; +import org.springframework.data.elasticsearch.client.ElasticsearchHost.State; +import org.springframework.data.elasticsearch.client.NoReachableHostException; +import org.springframework.http.HttpHeaders; + +/** + * @author Christoph Strobl + * @since 4.0 + */ +class SingleNodeHostProvider implements HostProvider { + + private final HttpHeaders headers; + private final Consumer errorListener; + private final String hostname; + private volatile ElasticsearchHost state; + + SingleNodeHostProvider(HttpHeaders headers, Consumer errorListener, String host) { + + this.headers = headers; + this.errorListener = errorListener; + this.hostname = host; + this.state = new ElasticsearchHost(hostname, State.UNKNOWN); + } + + @Override + public Mono clusterInfo() { + + return createWebClient(hostname, headers) // + .head().uri("/").exchange() // + .flatMap(it -> { + + if(it.statusCode().isError()) { + state = ElasticsearchHost.offline(hostname); + } else { + state = ElasticsearchHost.online(hostname); + } + return Mono.just(state); + }).onErrorResume(throwable -> { + + state = ElasticsearchHost.offline(hostname); + errorListener.accept(throwable); + return Mono.just(state); + }) // + .flatMap(it -> Mono.just(new ClusterInformation(Collections.singleton(it)))); + } + + @Override + public Mono lookupActiveHost(VerificationMode verificationMode) { + + if (VerificationMode.LAZY.equals(verificationMode) && state.isOnline()) { + return Mono.just(hostname); + } + + return clusterInfo().flatMap(it -> { + + ElasticsearchHost host = it.getNodes().iterator().next(); + if (host.isOnline()) { + return Mono.just(host.getHost()); + } + + return Mono.error(() -> new NoReachableHostException(Collections.singleton(host))); + }); + } + + @Override + public HttpHeaders getDefaultHeaders() { + return this.headers; + } + + @Override + public HostProvider withDefaultHeaders(HttpHeaders headers) { + return new SingleNodeHostProvider(headers, errorListener, hostname); + } + + @Override + public HostProvider withErrorListener(Consumer errorListener) { + return new SingleNodeHostProvider(headers, errorListener, hostname); + } + + ElasticsearchHost getCachedHostState() { + return state; + } + +} diff --git a/src/main/java/org/springframework/data/elasticsearch/client/util/RequestConverters.java b/src/main/java/org/springframework/data/elasticsearch/client/util/RequestConverters.java new file mode 100644 index 000000000..c2df9e927 --- /dev/null +++ b/src/main/java/org/springframework/data/elasticsearch/client/util/RequestConverters.java @@ -0,0 +1,1013 @@ +/* + * Copyright 2018 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.data.elasticsearch.client.util; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.nio.charset.Charset; +import java.util.List; +import java.util.Locale; +import java.util.StringJoiner; + +import org.apache.http.HttpEntity; +import org.apache.http.entity.ByteArrayEntity; +import org.apache.http.entity.ContentType; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; +import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; +import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; +import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptRequest; +import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.explain.ExplainRequest; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.MultiGetRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.search.ClearScrollRequest; +import org.elasticsearch.action.search.MultiSearchRequest; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchScrollRequest; +import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Requests; +import org.elasticsearch.client.RethrottleRequest; +import org.elasticsearch.cluster.health.ClusterHealthStatus; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Priority; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.lucene.uid.Versions; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.rankeval.RankEvalRequest; +import org.elasticsearch.index.reindex.AbstractBulkByScrollRequest; +import org.elasticsearch.index.reindex.DeleteByQueryRequest; +import org.elasticsearch.index.reindex.ReindexRequest; +import org.elasticsearch.index.reindex.UpdateByQueryRequest; +import org.elasticsearch.search.fetch.subphase.FetchSourceContext; +import org.elasticsearch.tasks.TaskId; +import org.springframework.data.elasticsearch.client.reactive.ReactiveElasticsearchClient; +import org.springframework.http.HttpMethod; + +/** + *

+ * Original implementation source {@link org.elasticsearch.client.RequestConverters} by {@literal Elasticsearch} + * (https://www.elastic.co) licensed under the Apache License, Version 2.0. + *

+ * Modified for usage with {@link ReactiveElasticsearchClient}. + * + * @since 4.0 + */ +public class RequestConverters { + + private static final XContentType REQUEST_BODY_CONTENT_TYPE = XContentType.JSON; + + private RequestConverters() { + // Contains only status utility methods + } + + public static Request delete(DeleteRequest deleteRequest) { + String endpoint = endpoint(deleteRequest.index(), deleteRequest.type(), deleteRequest.id()); + Request request = new Request(HttpMethod.DELETE.name(), endpoint); + + Params parameters = new Params(request); + parameters.withRouting(deleteRequest.routing()); + parameters.withTimeout(deleteRequest.timeout()); + parameters.withVersion(deleteRequest.version()); + parameters.withVersionType(deleteRequest.versionType()); + parameters.withRefreshPolicy(deleteRequest.getRefreshPolicy()); + parameters.withWaitForActiveShards(deleteRequest.waitForActiveShards()); + return request; + } + + public static Request info() { + return new Request(HttpMethod.GET.name(), "/"); + } + + public static Request bulk(BulkRequest bulkRequest) throws IOException { + Request request = new Request(HttpMethod.POST.name(), "/_bulk"); + + Params parameters = new Params(request); + parameters.withTimeout(bulkRequest.timeout()); + parameters.withRefreshPolicy(bulkRequest.getRefreshPolicy()); + + // parameters.withPipeline(bulkRequest.pipeline()); + // parameters.withRouting(bulkRequest.routing()); + + // Bulk API only supports newline delimited JSON or Smile. Before executing + // the bulk, we need to check that all requests have the same content-type + // and this content-type is supported by the Bulk API. + XContentType bulkContentType = null; + for (int i = 0; i < bulkRequest.numberOfActions(); i++) { + DocWriteRequest action = bulkRequest.requests().get(i); + + DocWriteRequest.OpType opType = action.opType(); + if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) { + bulkContentType = enforceSameContentType((IndexRequest) action, bulkContentType); + + } else if (opType == DocWriteRequest.OpType.UPDATE) { + UpdateRequest updateRequest = (UpdateRequest) action; + if (updateRequest.doc() != null) { + bulkContentType = enforceSameContentType(updateRequest.doc(), bulkContentType); + } + if (updateRequest.upsertRequest() != null) { + bulkContentType = enforceSameContentType(updateRequest.upsertRequest(), bulkContentType); + } + } + } + + if (bulkContentType == null) { + bulkContentType = XContentType.JSON; + } + + final byte separator = bulkContentType.xContent().streamSeparator(); + final ContentType requestContentType = createContentType(bulkContentType); + + ByteArrayOutputStream content = new ByteArrayOutputStream(); + for (DocWriteRequest action : bulkRequest.requests()) { + DocWriteRequest.OpType opType = action.opType(); + + try (XContentBuilder metadata = XContentBuilder.builder(bulkContentType.xContent())) { + metadata.startObject(); + { + metadata.startObject(opType.getLowercase()); + if (Strings.hasLength(action.index())) { + metadata.field("_index", action.index()); + } + if (Strings.hasLength(action.type())) { + metadata.field("_type", action.type()); + } + if (Strings.hasLength(action.id())) { + metadata.field("_id", action.id()); + } + if (Strings.hasLength(action.routing())) { + metadata.field("routing", action.routing()); + } + if (action.version() != Versions.MATCH_ANY) { + metadata.field("version", action.version()); + } + + VersionType versionType = action.versionType(); + if (versionType != VersionType.INTERNAL) { + if (versionType == VersionType.EXTERNAL) { + metadata.field("version_type", "external"); + } else if (versionType == VersionType.EXTERNAL_GTE) { + metadata.field("version_type", "external_gte"); + } else if (versionType == VersionType.FORCE) { + metadata.field("version_type", "force"); + } + } + + if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) { + IndexRequest indexRequest = (IndexRequest) action; + if (Strings.hasLength(indexRequest.getPipeline())) { + metadata.field("pipeline", indexRequest.getPipeline()); + } + } else if (opType == DocWriteRequest.OpType.UPDATE) { + UpdateRequest updateRequest = (UpdateRequest) action; + if (updateRequest.retryOnConflict() > 0) { + metadata.field("retry_on_conflict", updateRequest.retryOnConflict()); + } + if (updateRequest.fetchSource() != null) { + metadata.field("_source", updateRequest.fetchSource()); + } + } + metadata.endObject(); + } + metadata.endObject(); + + BytesRef metadataSource = BytesReference.bytes(metadata).toBytesRef(); + content.write(metadataSource.bytes, metadataSource.offset, metadataSource.length); + content.write(separator); + } + + BytesRef source = null; + if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) { + IndexRequest indexRequest = (IndexRequest) action; + BytesReference indexSource = indexRequest.source(); + XContentType indexXContentType = indexRequest.getContentType(); + + try (XContentParser parser = XContentHelper.createParser( + /* + * EMPTY and THROW are fine here because we just call + * copyCurrentStructure which doesn't touch the + * registry or deprecation. + */ + NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, indexSource, + indexXContentType)) { + try (XContentBuilder builder = XContentBuilder.builder(bulkContentType.xContent())) { + builder.copyCurrentStructure(parser); + source = BytesReference.bytes(builder).toBytesRef(); + } + } + } else if (opType == DocWriteRequest.OpType.UPDATE) { + source = XContentHelper.toXContent((UpdateRequest) action, bulkContentType, false).toBytesRef(); + } + + if (source != null) { + content.write(source.bytes, source.offset, source.length); + content.write(separator); + } + } + request.setEntity(new ByteArrayEntity(content.toByteArray(), 0, content.size(), requestContentType)); + return request; + } + + public static Request exists(GetRequest getRequest) { + return getStyleRequest(HttpMethod.HEAD.name(), getRequest); + } + + public static Request get(GetRequest getRequest) { + return getStyleRequest(HttpMethod.GET.name(), getRequest); + } + + private static Request getStyleRequest(String method, GetRequest getRequest) { + Request request = new Request(method, endpoint(getRequest.index(), getRequest.type(), getRequest.id())); + + Params parameters = new Params(request); + parameters.withPreference(getRequest.preference()); + parameters.withRouting(getRequest.routing()); + parameters.withRefresh(getRequest.refresh()); + parameters.withRealtime(getRequest.realtime()); + parameters.withStoredFields(getRequest.storedFields()); + parameters.withVersion(getRequest.version()); + parameters.withVersionType(getRequest.versionType()); + parameters.withFetchSourceContext(getRequest.fetchSourceContext()); + + return request; + } + + public static Request sourceExists(GetRequest getRequest) { + Request request = new Request(HttpMethod.HEAD.name(), + endpoint(getRequest.index(), getRequest.type(), getRequest.id(), "_source")); + + Params parameters = new Params(request); + parameters.withPreference(getRequest.preference()); + parameters.withRouting(getRequest.routing()); + parameters.withRefresh(getRequest.refresh()); + parameters.withRealtime(getRequest.realtime()); + // Version params are not currently supported by the source exists API so are not passed + return request; + } + + public static Request multiGet(MultiGetRequest multiGetRequest) { + Request request = new Request(HttpMethod.POST.name(), "/_mget"); + + Params parameters = new Params(request); + parameters.withPreference(multiGetRequest.preference()); + parameters.withRealtime(multiGetRequest.realtime()); + parameters.withRefresh(multiGetRequest.refresh()); + + request.setEntity(createEntity(multiGetRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + public static Request index(IndexRequest indexRequest) { + String method = Strings.hasLength(indexRequest.id()) ? HttpMethod.PUT.name() : HttpMethod.POST.name(); + boolean isCreate = (indexRequest.opType() == DocWriteRequest.OpType.CREATE); + String endpoint = endpoint(indexRequest.index(), indexRequest.type(), indexRequest.id(), + isCreate ? "_create" : null); + Request request = new Request(method, endpoint); + + Params parameters = new Params(request); + parameters.withRouting(indexRequest.routing()); + parameters.withTimeout(indexRequest.timeout()); + parameters.withVersion(indexRequest.version()); + parameters.withVersionType(indexRequest.versionType()); + parameters.withPipeline(indexRequest.getPipeline()); + parameters.withRefreshPolicy(indexRequest.getRefreshPolicy()); + parameters.withWaitForActiveShards(indexRequest.waitForActiveShards()); + + BytesRef source = indexRequest.source().toBytesRef(); + ContentType contentType = createContentType(indexRequest.getContentType()); + request.setEntity(new ByteArrayEntity(source.bytes, source.offset, source.length, contentType)); + return request; + } + + public static Request ping() { + return new Request(HttpMethod.HEAD.name(), "/"); + } + + public static Request update(UpdateRequest updateRequest) { + String endpoint = endpoint(updateRequest.index(), updateRequest.type(), updateRequest.id(), "_update"); + Request request = new Request(HttpMethod.POST.name(), endpoint); + + Params parameters = new Params(request); + parameters.withRouting(updateRequest.routing()); + parameters.withTimeout(updateRequest.timeout()); + parameters.withRefreshPolicy(updateRequest.getRefreshPolicy()); + parameters.withWaitForActiveShards(updateRequest.waitForActiveShards()); + parameters.withDocAsUpsert(updateRequest.docAsUpsert()); + parameters.withFetchSourceContext(updateRequest.fetchSource()); + parameters.withRetryOnConflict(updateRequest.retryOnConflict()); + parameters.withVersion(updateRequest.version()); + parameters.withVersionType(updateRequest.versionType()); + + // The Java API allows update requests with different content types + // set for the partial document and the upsert document. This client + // only accepts update requests that have the same content types set + // for both doc and upsert. + XContentType xContentType = null; + if (updateRequest.doc() != null) { + xContentType = updateRequest.doc().getContentType(); + } + if (updateRequest.upsertRequest() != null) { + XContentType upsertContentType = updateRequest.upsertRequest().getContentType(); + if ((xContentType != null) && (xContentType != upsertContentType)) { + throw new IllegalStateException("Update request cannot have different content types for doc [" + xContentType + + "]" + " and upsert [" + upsertContentType + "] documents"); + } else { + xContentType = upsertContentType; + } + } + if (xContentType == null) { + xContentType = Requests.INDEX_CONTENT_TYPE; + } + request.setEntity(createEntity(updateRequest, xContentType)); + return request; + } + + public static Request search(SearchRequest searchRequest) { + Request request = new Request(HttpMethod.POST.name(), + endpoint(searchRequest.indices(), searchRequest.types(), "_search")); + + Params params = new Params(request); + addSearchRequestParams(params, searchRequest); + + if (searchRequest.source() != null) { + request.setEntity(createEntity(searchRequest.source(), REQUEST_BODY_CONTENT_TYPE)); + } + return request; + } + + private static void addSearchRequestParams(Params params, SearchRequest searchRequest) { + params.putParam("typed_keys", "true"); + params.withRouting(searchRequest.routing()); + params.withPreference(searchRequest.preference()); + params.withIndicesOptions(searchRequest.indicesOptions()); + params.putParam("search_type", searchRequest.searchType().name().toLowerCase(Locale.ROOT)); + if (searchRequest.requestCache() != null) { + params.putParam("request_cache", Boolean.toString(searchRequest.requestCache())); + } + if (searchRequest.allowPartialSearchResults() != null) { + params.putParam("allow_partial_search_results", Boolean.toString(searchRequest.allowPartialSearchResults())); + } + params.putParam("batched_reduce_size", Integer.toString(searchRequest.getBatchedReduceSize())); + if (searchRequest.scroll() != null) { + params.putParam("scroll", searchRequest.scroll().keepAlive()); + } + } + + public static Request searchScroll(SearchScrollRequest searchScrollRequest) { + Request request = new Request(HttpMethod.POST.name(), "/_search/scroll"); + request.setEntity(createEntity(searchScrollRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + public static Request clearScroll(ClearScrollRequest clearScrollRequest) { + Request request = new Request(HttpMethod.DELETE.name(), "/_search/scroll"); + request.setEntity(createEntity(clearScrollRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + public static Request multiSearch(MultiSearchRequest multiSearchRequest) throws IOException { + Request request = new Request(HttpMethod.POST.name(), "/_msearch"); + + Params params = new Params(request); + params.putParam("typed_keys", "true"); + if (multiSearchRequest.maxConcurrentSearchRequests() != MultiSearchRequest.MAX_CONCURRENT_SEARCH_REQUESTS_DEFAULT) { + params.putParam("max_concurrent_searches", Integer.toString(multiSearchRequest.maxConcurrentSearchRequests())); + } + + XContent xContent = REQUEST_BODY_CONTENT_TYPE.xContent(); + byte[] source = MultiSearchRequest.writeMultiLineFormat(multiSearchRequest, xContent); + request.setEntity(new ByteArrayEntity(source, createContentType(xContent.type()))); + return request; + } + + public static Request explain(ExplainRequest explainRequest) throws IOException { + Request request = new Request(HttpMethod.GET.name(), + endpoint(explainRequest.index(), explainRequest.type(), explainRequest.id(), "_explain")); + + Params params = new Params(request); + params.withStoredFields(explainRequest.storedFields()); + params.withFetchSourceContext(explainRequest.fetchSourceContext()); + params.withRouting(explainRequest.routing()); + params.withPreference(explainRequest.preference()); + request.setEntity(createEntity(explainRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + public static Request fieldCaps(FieldCapabilitiesRequest fieldCapabilitiesRequest) { + Request request = new Request(HttpMethod.GET.name(), endpoint(fieldCapabilitiesRequest.indices(), "_field_caps")); + + Params params = new Params(request); + params.withFields(fieldCapabilitiesRequest.fields()); + params.withIndicesOptions(fieldCapabilitiesRequest.indicesOptions()); + return request; + } + + public static Request rankEval(RankEvalRequest rankEvalRequest) throws IOException { + Request request = new Request(HttpMethod.GET.name(), + endpoint(rankEvalRequest.indices(), Strings.EMPTY_ARRAY, "_rank_eval")); + + Params params = new Params(request); + params.withIndicesOptions(rankEvalRequest.indicesOptions()); + + request.setEntity(createEntity(rankEvalRequest.getRankEvalSpec(), REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + public static Request reindex(ReindexRequest reindexRequest) throws IOException { + return prepareReindexRequest(reindexRequest, true); + } + + static Request submitReindex(ReindexRequest reindexRequest) throws IOException { + return prepareReindexRequest(reindexRequest, false); + } + + private static Request prepareReindexRequest(ReindexRequest reindexRequest, boolean waitForCompletion) + throws IOException { + String endpoint = new EndpointBuilder().addPathPart("_reindex").build(); + Request request = new Request(HttpMethod.POST.name(), endpoint); + Params params = new Params(request).withWaitForCompletion(waitForCompletion).withRefresh(reindexRequest.isRefresh()) + .withTimeout(reindexRequest.getTimeout()).withWaitForActiveShards(reindexRequest.getWaitForActiveShards()) + .withRequestsPerSecond(reindexRequest.getRequestsPerSecond()); + + if (reindexRequest.getScrollTime() != null) { + params.putParam("scroll", reindexRequest.getScrollTime()); + } + request.setEntity(createEntity(reindexRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + public static Request updateByQuery(UpdateByQueryRequest updateByQueryRequest) throws IOException { + String endpoint = endpoint(updateByQueryRequest.indices(), updateByQueryRequest.getDocTypes(), "_update_by_query"); + Request request = new Request(HttpMethod.POST.name(), endpoint); + Params params = new Params(request).withRouting(updateByQueryRequest.getRouting()) + .withPipeline(updateByQueryRequest.getPipeline()).withRefresh(updateByQueryRequest.isRefresh()) + .withTimeout(updateByQueryRequest.getTimeout()) + .withWaitForActiveShards(updateByQueryRequest.getWaitForActiveShards()) + .withRequestsPerSecond(updateByQueryRequest.getRequestsPerSecond()) + .withIndicesOptions(updateByQueryRequest.indicesOptions()); + if (updateByQueryRequest.isAbortOnVersionConflict() == false) { + params.putParam("conflicts", "proceed"); + } + if (updateByQueryRequest.getBatchSize() != AbstractBulkByScrollRequest.DEFAULT_SCROLL_SIZE) { + params.putParam("scroll_size", Integer.toString(updateByQueryRequest.getBatchSize())); + } + if (updateByQueryRequest.getScrollTime() != AbstractBulkByScrollRequest.DEFAULT_SCROLL_TIMEOUT) { + params.putParam("scroll", updateByQueryRequest.getScrollTime()); + } + if (updateByQueryRequest.getSize() > 0) { + params.putParam("size", Integer.toString(updateByQueryRequest.getSize())); + } + request.setEntity(createEntity(updateByQueryRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + public static Request deleteByQuery(DeleteByQueryRequest deleteByQueryRequest) throws IOException { + String endpoint = endpoint(deleteByQueryRequest.indices(), deleteByQueryRequest.getDocTypes(), "_delete_by_query"); + Request request = new Request(HttpMethod.POST.name(), endpoint); + Params params = new Params(request).withRouting(deleteByQueryRequest.getRouting()) + .withRefresh(deleteByQueryRequest.isRefresh()).withTimeout(deleteByQueryRequest.getTimeout()) + .withWaitForActiveShards(deleteByQueryRequest.getWaitForActiveShards()) + .withRequestsPerSecond(deleteByQueryRequest.getRequestsPerSecond()) + .withIndicesOptions(deleteByQueryRequest.indicesOptions()); + if (deleteByQueryRequest.isAbortOnVersionConflict() == false) { + params.putParam("conflicts", "proceed"); + } + if (deleteByQueryRequest.getBatchSize() != AbstractBulkByScrollRequest.DEFAULT_SCROLL_SIZE) { + params.putParam("scroll_size", Integer.toString(deleteByQueryRequest.getBatchSize())); + } + if (deleteByQueryRequest.getScrollTime() != AbstractBulkByScrollRequest.DEFAULT_SCROLL_TIMEOUT) { + params.putParam("scroll", deleteByQueryRequest.getScrollTime()); + } + if (deleteByQueryRequest.getSize() > 0) { + params.putParam("size", Integer.toString(deleteByQueryRequest.getSize())); + } + request.setEntity(createEntity(deleteByQueryRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + public static Request rethrottleReindex(RethrottleRequest rethrottleRequest) { + return rethrottle(rethrottleRequest, "_reindex"); + } + + public static Request rethrottleUpdateByQuery(RethrottleRequest rethrottleRequest) { + return rethrottle(rethrottleRequest, "_update_by_query"); + } + + public static Request rethrottleDeleteByQuery(RethrottleRequest rethrottleRequest) { + return rethrottle(rethrottleRequest, "_delete_by_query"); + } + + private static Request rethrottle(RethrottleRequest rethrottleRequest, String firstPathPart) { + String endpoint = new EndpointBuilder().addPathPart(firstPathPart) + .addPathPart(rethrottleRequest.getTaskId().toString()).addPathPart("_rethrottle").build(); + Request request = new Request(HttpMethod.POST.name(), endpoint); + Params params = new Params(request).withRequestsPerSecond(rethrottleRequest.getRequestsPerSecond()); + // we set "group_by" to "none" because this is the response format we can parse back + params.putParam("group_by", "none"); + return request; + } + + public static Request putScript(PutStoredScriptRequest putStoredScriptRequest) throws IOException { + String endpoint = new EndpointBuilder().addPathPartAsIs("_scripts").addPathPart(putStoredScriptRequest.id()) + .build(); + Request request = new Request(HttpMethod.POST.name(), endpoint); + Params params = new Params(request); + params.withTimeout(putStoredScriptRequest.timeout()); + params.withMasterTimeout(putStoredScriptRequest.masterNodeTimeout()); + if (Strings.hasText(putStoredScriptRequest.context())) { + params.putParam("context", putStoredScriptRequest.context()); + } + request.setEntity(createEntity(putStoredScriptRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + public static Request analyze(AnalyzeRequest request) throws IOException { + EndpointBuilder builder = new EndpointBuilder(); + String index = request.index(); + if (index != null) { + builder.addPathPart(index); + } + builder.addPathPartAsIs("_analyze"); + Request req = new Request(HttpMethod.GET.name(), builder.build()); + req.setEntity(createEntity(request, REQUEST_BODY_CONTENT_TYPE)); + return req; + } + + // static Request termVectors(TermVectorsRequest request) throws IOException { + // String endpoint = new EndpointBuilder().addPathPart(request.index(), request.type(), request.id()) + // .addPathPartAsIs("_termvectors").build(); + // + // Request req = new Request(HttpMethod.GET.name(), endpoint); + // Params params = new Params(req); + // params.withRouting(request.routing()); + // params.withPreference(request.preference()); + // params.withFields(request.selectedFields().toArray(new String[0])); + // params.withRealtime(request.realtime()); + // + // req.setEntity(createEntity(request, REQUEST_BODY_CONTENT_TYPE)); + // return req; + // } + + public static Request getScript(GetStoredScriptRequest getStoredScriptRequest) { + String endpoint = new EndpointBuilder().addPathPartAsIs("_scripts").addPathPart(getStoredScriptRequest.id()) + .build(); + Request request = new Request(HttpMethod.GET.name(), endpoint); + Params params = new Params(request); + params.withMasterTimeout(getStoredScriptRequest.masterNodeTimeout()); + return request; + } + + public static Request deleteScript(DeleteStoredScriptRequest deleteStoredScriptRequest) { + String endpoint = new EndpointBuilder().addPathPartAsIs("_scripts").addPathPart(deleteStoredScriptRequest.id()) + .build(); + Request request = new Request(HttpMethod.DELETE.name(), endpoint); + Params params = new Params(request); + params.withTimeout(deleteStoredScriptRequest.timeout()); + params.withMasterTimeout(deleteStoredScriptRequest.masterNodeTimeout()); + return request; + } + + static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType) { + + try { + BytesRef source = XContentHelper.toXContent(toXContent, xContentType, false).toBytesRef(); + return new ByteArrayEntity(source.bytes, source.offset, source.length, createContentType(xContentType)); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + static String endpoint(String index, String type, String id) { + return new EndpointBuilder().addPathPart(index, type, id).build(); + } + + static String endpoint(String index, String type, String id, String endpoint) { + return new EndpointBuilder().addPathPart(index, type, id).addPathPartAsIs(endpoint).build(); + } + + static String endpoint(String[] indices) { + return new EndpointBuilder().addCommaSeparatedPathParts(indices).build(); + } + + static String endpoint(String[] indices, String endpoint) { + return new EndpointBuilder().addCommaSeparatedPathParts(indices).addPathPartAsIs(endpoint).build(); + } + + static String endpoint(String[] indices, String[] types, String endpoint) { + return new EndpointBuilder().addCommaSeparatedPathParts(indices).addCommaSeparatedPathParts(types) + .addPathPartAsIs(endpoint).build(); + } + + static String endpoint(String[] indices, String endpoint, String[] suffixes) { + return new EndpointBuilder().addCommaSeparatedPathParts(indices).addPathPartAsIs(endpoint) + .addCommaSeparatedPathParts(suffixes).build(); + } + + static String endpoint(String[] indices, String endpoint, String type) { + return new EndpointBuilder().addCommaSeparatedPathParts(indices).addPathPartAsIs(endpoint).addPathPart(type) + .build(); + } + + /** + * Returns a {@link ContentType} from a given {@link XContentType}. + * + * @param xContentType the {@link XContentType} + * @return the {@link ContentType} + */ + @SuppressForbidden(reason = "Only allowed place to convert a XContentType to a ContentType") + public static ContentType createContentType(final XContentType xContentType) { + return ContentType.create(xContentType.mediaTypeWithoutParameters(), (Charset) null); + } + + /** + * Utility class to help with common parameter names and patterns. Wraps a {@link Request} and adds the parameters to + * it directly. + */ + static class Params { + private final Request request; + + Params(Request request) { + this.request = request; + } + + Params putParam(String name, String value) { + if (Strings.hasLength(value)) { + request.addParameter(name, value); + } + return this; + } + + Params putParam(String key, TimeValue value) { + if (value != null) { + return putParam(key, value.getStringRep()); + } + return this; + } + + Params withDocAsUpsert(boolean docAsUpsert) { + if (docAsUpsert) { + return putParam("doc_as_upsert", Boolean.TRUE.toString()); + } + return this; + } + + Params withFetchSourceContext(FetchSourceContext fetchSourceContext) { + if (fetchSourceContext != null) { + if (fetchSourceContext.fetchSource() == false) { + putParam("_source", Boolean.FALSE.toString()); + } + if (fetchSourceContext.includes() != null && fetchSourceContext.includes().length > 0) { + putParam("_source_includes", String.join(",", fetchSourceContext.includes())); + } + if (fetchSourceContext.excludes() != null && fetchSourceContext.excludes().length > 0) { + putParam("_source_excludes", String.join(",", fetchSourceContext.excludes())); + } + } + return this; + } + + Params withFields(String[] fields) { + if (fields != null && fields.length > 0) { + return putParam("fields", String.join(",", fields)); + } + return this; + } + + Params withMasterTimeout(TimeValue masterTimeout) { + return putParam("master_timeout", masterTimeout); + } + + Params withPipeline(String pipeline) { + return putParam("pipeline", pipeline); + } + + Params withPreference(String preference) { + return putParam("preference", preference); + } + + Params withRealtime(boolean realtime) { + if (realtime == false) { + return putParam("realtime", Boolean.FALSE.toString()); + } + return this; + } + + Params withRefresh(boolean refresh) { + if (refresh) { + return withRefreshPolicy(RefreshPolicy.IMMEDIATE); + } + return this; + } + + Params withRefreshPolicy(RefreshPolicy refreshPolicy) { + if (refreshPolicy != RefreshPolicy.NONE) { + return putParam("refresh", refreshPolicy.getValue()); + } + return this; + } + + Params withRequestsPerSecond(float requestsPerSecond) { + // the default in AbstractBulkByScrollRequest is Float.POSITIVE_INFINITY, + // but we don't want to add that to the URL parameters, instead we use -1 + if (Float.isFinite(requestsPerSecond)) { + return putParam("requests_per_second", Float.toString(requestsPerSecond)); + } else { + return putParam("requests_per_second", "-1"); + } + } + + Params withRetryOnConflict(int retryOnConflict) { + if (retryOnConflict > 0) { + return putParam("retry_on_conflict", String.valueOf(retryOnConflict)); + } + return this; + } + + Params withRouting(String routing) { + return putParam("routing", routing); + } + + Params withStoredFields(String[] storedFields) { + if (storedFields != null && storedFields.length > 0) { + return putParam("stored_fields", String.join(",", storedFields)); + } + return this; + } + + Params withTimeout(TimeValue timeout) { + return putParam("timeout", timeout); + } + + Params withVersion(long version) { + if (version != Versions.MATCH_ANY) { + return putParam("version", Long.toString(version)); + } + return this; + } + + Params withVersionType(VersionType versionType) { + if (versionType != VersionType.INTERNAL) { + return putParam("version_type", versionType.name().toLowerCase(Locale.ROOT)); + } + return this; + } + + Params withWaitForActiveShards(ActiveShardCount activeShardCount) { + return withWaitForActiveShards(activeShardCount, ActiveShardCount.DEFAULT); + } + + Params withWaitForActiveShards(ActiveShardCount activeShardCount, ActiveShardCount defaultActiveShardCount) { + if (activeShardCount != null && activeShardCount != defaultActiveShardCount) { + return putParam("wait_for_active_shards", activeShardCount.toString().toLowerCase(Locale.ROOT)); + } + return this; + } + + Params withIndicesOptions(IndicesOptions indicesOptions) { + withIgnoreUnavailable(indicesOptions.ignoreUnavailable()); + putParam("allow_no_indices", Boolean.toString(indicesOptions.allowNoIndices())); + String expandWildcards; + if (indicesOptions.expandWildcardsOpen() == false && indicesOptions.expandWildcardsClosed() == false) { + expandWildcards = "none"; + } else { + StringJoiner joiner = new StringJoiner(","); + if (indicesOptions.expandWildcardsOpen()) { + joiner.add("open"); + } + if (indicesOptions.expandWildcardsClosed()) { + joiner.add("closed"); + } + expandWildcards = joiner.toString(); + } + putParam("expand_wildcards", expandWildcards); + return this; + } + + Params withIgnoreUnavailable(boolean ignoreUnavailable) { + // Always explicitly place the ignore_unavailable value. + putParam("ignore_unavailable", Boolean.toString(ignoreUnavailable)); + return this; + } + + Params withHuman(boolean human) { + if (human) { + putParam("human", Boolean.toString(human)); + } + return this; + } + + Params withLocal(boolean local) { + if (local) { + putParam("local", Boolean.toString(local)); + } + return this; + } + + Params withIncludeDefaults(boolean includeDefaults) { + if (includeDefaults) { + return putParam("include_defaults", Boolean.TRUE.toString()); + } + return this; + } + + Params withPreserveExisting(boolean preserveExisting) { + if (preserveExisting) { + return putParam("preserve_existing", Boolean.TRUE.toString()); + } + return this; + } + + Params withDetailed(boolean detailed) { + if (detailed) { + return putParam("detailed", Boolean.TRUE.toString()); + } + return this; + } + + Params withWaitForCompletion(Boolean waitForCompletion) { + return putParam("wait_for_completion", waitForCompletion.toString()); + } + + Params withNodes(String[] nodes) { + if (nodes != null && nodes.length > 0) { + return putParam("nodes", String.join(",", nodes)); + } + return this; + } + + Params withActions(String[] actions) { + if (actions != null && actions.length > 0) { + return putParam("actions", String.join(",", actions)); + } + return this; + } + + Params withTaskId(TaskId taskId) { + if (taskId != null && taskId.isSet()) { + return putParam("task_id", taskId.toString()); + } + return this; + } + + Params withParentTaskId(TaskId parentTaskId) { + if (parentTaskId != null && parentTaskId.isSet()) { + return putParam("parent_task_id", parentTaskId.toString()); + } + return this; + } + + Params withVerify(boolean verify) { + if (verify) { + return putParam("verify", Boolean.TRUE.toString()); + } + return this; + } + + Params withWaitForStatus(ClusterHealthStatus status) { + if (status != null) { + return putParam("wait_for_status", status.name().toLowerCase(Locale.ROOT)); + } + return this; + } + + Params withWaitForNoRelocatingShards(boolean waitNoRelocatingShards) { + if (waitNoRelocatingShards) { + return putParam("wait_for_no_relocating_shards", Boolean.TRUE.toString()); + } + return this; + } + + Params withWaitForNoInitializingShards(boolean waitNoInitShards) { + if (waitNoInitShards) { + return putParam("wait_for_no_initializing_shards", Boolean.TRUE.toString()); + } + return this; + } + + Params withWaitForNodes(String waitForNodes) { + return putParam("wait_for_nodes", waitForNodes); + } + + Params withLevel(ClusterHealthRequest.Level level) { + return putParam("level", level.name().toLowerCase(Locale.ROOT)); + } + + Params withWaitForEvents(Priority waitForEvents) { + if (waitForEvents != null) { + return putParam("wait_for_events", waitForEvents.name().toLowerCase(Locale.ROOT)); + } + return this; + } + } + + /** + * Ensure that the {@link IndexRequest}'s content type is supported by the Bulk API and that it conforms to the + * current {@link BulkRequest}'s content type (if it's known at the time of this method get called). + * + * @return the {@link IndexRequest}'s content type + */ + static XContentType enforceSameContentType(IndexRequest indexRequest, @Nullable XContentType xContentType) { + XContentType requestContentType = indexRequest.getContentType(); + if (requestContentType != XContentType.JSON && requestContentType != XContentType.SMILE) { + throw new IllegalArgumentException("Unsupported content-type found for request with content-type [" + + requestContentType + "], only JSON and SMILE are supported"); + } + if (xContentType == null) { + return requestContentType; + } + if (requestContentType != xContentType) { + throw new IllegalArgumentException("Mismatching content-type found for request with content-type [" + + requestContentType + "], previous requests have content-type [" + xContentType + "]"); + } + return xContentType; + } + + /** + * Utility class to build request's endpoint given its parts as strings + */ + static class EndpointBuilder { + + private final StringJoiner joiner = new StringJoiner("/", "/", ""); + + EndpointBuilder addPathPart(String... parts) { + for (String part : parts) { + if (Strings.hasLength(part)) { + joiner.add(encodePart(part)); + } + } + return this; + } + + EndpointBuilder addCommaSeparatedPathParts(String[] parts) { + addPathPart(String.join(",", parts)); + return this; + } + + EndpointBuilder addCommaSeparatedPathParts(List parts) { + addPathPart(String.join(",", parts)); + return this; + } + + EndpointBuilder addPathPartAsIs(String... parts) { + for (String part : parts) { + if (Strings.hasLength(part)) { + joiner.add(part); + } + } + return this; + } + + String build() { + return joiner.toString(); + } + + private static String encodePart(String pathPart) { + try { + // encode each part (e.g. index, type and id) separately before merging them into the path + // we prepend "/" to the path part to make this path absolute, otherwise there can be issues with + // paths that start with `-` or contain `:` + URI uri = new URI(null, null, null, -1, "/" + pathPart, null, null); + // manually encode any slash that each part may contain + return uri.getRawPath().substring(1).replaceAll("/", "%2F"); + } catch (URISyntaxException e) { + throw new IllegalArgumentException("Path part [" + pathPart + "] couldn't be encoded", e); + } + } + } +} diff --git a/src/main/java/org/springframework/data/elasticsearch/core/ElasticsearchExceptionTranslator.java b/src/main/java/org/springframework/data/elasticsearch/core/ElasticsearchExceptionTranslator.java new file mode 100644 index 000000000..333f62fe5 --- /dev/null +++ b/src/main/java/org/springframework/data/elasticsearch/core/ElasticsearchExceptionTranslator.java @@ -0,0 +1,38 @@ +/* + * Copyright 2018 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.data.elasticsearch.core; + +import org.elasticsearch.ElasticsearchException; +import org.springframework.dao.DataAccessException; +import org.springframework.dao.support.PersistenceExceptionTranslator; + +/** + * @author Christoph Strobl + * @since 4.0 + */ +public class ElasticsearchExceptionTranslator implements PersistenceExceptionTranslator { + + @Override + public DataAccessException translateExceptionIfPossible(RuntimeException ex) { + + if (ex instanceof ElasticsearchException) { + // TODO: exception translation + } + + return null; + } +} diff --git a/src/main/java/org/springframework/data/elasticsearch/core/ReactiveElasticsearchTemplate.java b/src/main/java/org/springframework/data/elasticsearch/core/ReactiveElasticsearchTemplate.java new file mode 100644 index 000000000..cfa9de718 --- /dev/null +++ b/src/main/java/org/springframework/data/elasticsearch/core/ReactiveElasticsearchTemplate.java @@ -0,0 +1,349 @@ +/* + * Copyright 2018 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.data.elasticsearch.core; + +import static org.elasticsearch.index.VersionType.*; + +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.client.Requests; +import org.elasticsearch.index.get.GetResult; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.sort.FieldSortBuilder; +import org.elasticsearch.search.sort.SortBuilders; +import org.elasticsearch.search.sort.SortOrder; +import org.reactivestreams.Publisher; +import org.springframework.data.domain.Sort; +import org.springframework.data.elasticsearch.client.reactive.ReactiveElasticsearchClient; +import org.springframework.data.elasticsearch.core.convert.ElasticsearchConverter; +import org.springframework.data.elasticsearch.core.convert.MappingElasticsearchConverter; +import org.springframework.data.elasticsearch.core.mapping.ElasticsearchPersistentEntity; +import org.springframework.data.elasticsearch.core.mapping.SimpleElasticsearchMappingContext; +import org.springframework.data.elasticsearch.core.query.CriteriaQuery; +import org.springframework.data.elasticsearch.core.query.Query; +import org.springframework.data.mapping.PersistentPropertyAccessor; +import org.springframework.lang.Nullable; +import org.springframework.util.StringUtils; +import org.springframework.web.client.HttpClientErrorException; + +/** + * @author Christoph Strobl + * @since 4.0 + */ +public class ReactiveElasticsearchTemplate { + + private final ReactiveElasticsearchClient client; + private final ElasticsearchConverter converter; + private final DefaultResultMapper mapper; + private final ElasticsearchExceptionTranslator exceptionTranslator; + + public ReactiveElasticsearchTemplate(ReactiveElasticsearchClient client) { + this(client, new MappingElasticsearchConverter(new SimpleElasticsearchMappingContext())); + } + + public ReactiveElasticsearchTemplate(ReactiveElasticsearchClient client, ElasticsearchConverter converter) { + + this.client = client; + this.converter = converter; + this.mapper = new DefaultResultMapper(converter.getMappingContext()); + this.exceptionTranslator = new ElasticsearchExceptionTranslator(); + } + + public Mono index(T entity) { + return index(entity, null); + } + + public Mono index(T entity, String index) { + return index(entity, index, null); + } + + /** + * Add the given entity to the index. + * + * @param entity + * @param index + * @param type + * @param + * @return + */ + public Mono index(T entity, String index, String type) { + + ElasticsearchPersistentEntity persistentEntity = lookupPersistentEntity(entity.getClass()); + return doIndex(entity, persistentEntity, index, type) // + .map(it -> { + + // TODO: update id if necessary! + // it.getId() + // it.getVersion() + + return entity; + }); + } + + public Mono get(String id, Class resultType) { + return get(id, resultType, null); + } + + public Mono get(String id, Class resultType, @Nullable String index) { + return get(id, resultType, index, null); + } + + /** + * Fetch the entity with given id. + * + * @param id must not be {@literal null}. + * @param resultType must not be {@literal null}. + * @param index + * @param type + * @param + * @return the {@link Mono} emitting the entity or signalling completion if none found. + */ + public Mono get(String id, Class resultType, @Nullable String index, @Nullable String type) { + + ElasticsearchPersistentEntity persistentEntity = lookupPersistentEntity(resultType); + GetRequest request = new GetRequest(persistentEntity.getIndexName(), persistentEntity.getIndexType(), id); + + return goGet(id, persistentEntity, index, type).map(it -> mapper.mapEntity(it.sourceAsString(), resultType)); + } + + /** + * Search the index for entities matching the given {@link CriteriaQuery query}. + * + * @param query must not be {@literal null}. + * @param resultType must not be {@literal null}. + * @param + * @return + */ + public Flux query(CriteriaQuery query, Class resultType) { + + ElasticsearchPersistentEntity entity = lookupPersistentEntity(resultType); + + SearchRequest request = new SearchRequest(indices(query, entity)); + request.types(indexTypes(query, entity)); + + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + searchSourceBuilder.query(mappedQuery(query, entity)); + // TODO: request.source().postFilter(elasticsearchFilter); -- filter query + + searchSourceBuilder.version(entity.hasVersionProperty()); // This has been true by default before + searchSourceBuilder.trackScores(query.getTrackScores()); + + if (query.getSourceFilter() != null) { + searchSourceBuilder.fetchSource(query.getSourceFilter().getIncludes(), query.getSourceFilter().getExcludes()); + } + + if (query.getPageable().isPaged()) { + + long offset = query.getPageable().getOffset(); + if (offset > Integer.MAX_VALUE) { + throw new IllegalArgumentException(String.format("Offset must not be more than %s", Integer.MAX_VALUE)); + } + + searchSourceBuilder.from((int) offset); + searchSourceBuilder.size(query.getPageable().getPageSize()); + } + + if (query.getIndicesOptions() != null) { + request.indicesOptions(query.getIndicesOptions()); + } + + sort(query, entity).forEach(searchSourceBuilder::sort); + + if (query.getMinScore() > 0) { + searchSourceBuilder.minScore(query.getMinScore()); + } + request.source(searchSourceBuilder); + + return Flux.from( + execute(client -> client.search(request).map(it -> mapper.mapEntity(it.getSourceAsString(), resultType)))); + } + + /** + * Execute within a {@link ClientCallback} managing resources and translating errors. + * + * @param callback must not be {@literal null}. + * @param + * @return the {@link Publisher} emitting results. + */ + public Publisher execute(ClientCallback> callback) { + return Flux.from(callback.doWithClient(this.client)).onErrorMap(this::translateException); + } + + // Customization Hooks + + protected Mono goGet(String id, ElasticsearchPersistentEntity entity, @Nullable String index, + @Nullable String type) { + + String indexToUse = indexName(index, entity); + String typeToUse = typeName(type, entity); + + return doGet(new GetRequest(indexToUse, typeToUse, id)); + } + + protected Mono doGet(GetRequest request) { + + return Mono.from(execute(client -> client.get(request))) // + .onErrorResume((it) -> { + + if (it instanceof HttpClientErrorException) { + return ((HttpClientErrorException) it).getRawStatusCode() == 404; + } + return false; + + }, (it) -> Mono.empty()); + } + + protected Mono doIndex(Object value, ElasticsearchPersistentEntity entity, @Nullable String index, + @Nullable String type) { + + PersistentPropertyAccessor propertyAccessor = entity.getPropertyAccessor(value); + Object id = propertyAccessor.getProperty(entity.getIdProperty()); + + String indexToUse = indexName(index, entity); + String typeToUse = typeName(type, entity); + + IndexRequest request = id != null ? new IndexRequest(indexToUse, typeToUse, id.toString()) + : new IndexRequest(indexToUse, typeToUse); + + try { + request.source(mapper.getEntityMapper().mapToString(value), Requests.INDEX_CONTENT_TYPE); + } catch (IOException e) { + throw new RuntimeException(e); + } + + if (entity.hasVersionProperty()) { + + Object version = propertyAccessor.getProperty(entity.getVersionProperty()); + if (version != null) { + request.version(((Number) version).longValue()); + request.versionType(EXTERNAL); + } + } + + if (entity.getParentIdProperty() != null) { + + Object parentId = propertyAccessor.getProperty(entity.getParentIdProperty()); + if (parentId != null) { + request.parent(parentId.toString()); + } + } + + return doIndex(request.setRefreshPolicy(RefreshPolicy.IMMEDIATE)); + } + + protected Mono doIndex(IndexRequest request) { + return Mono.from(execute(client -> client.index(request))); + } + + // private helpers + + private static String indexName(@Nullable String index, ElasticsearchPersistentEntity entity) { + return StringUtils.isEmpty(index) ? entity.getIndexName() : index; + } + + private static String typeName(@Nullable String type, ElasticsearchPersistentEntity entity) { + return StringUtils.isEmpty(type) ? entity.getIndexType() : type; + } + + private static String[] indices(CriteriaQuery query, ElasticsearchPersistentEntity entity) { + + if (query.getIndices().isEmpty()) { + return new String[] { entity.getIndexName() }; + } + + return query.getIndices().toArray(new String[0]); + } + + private static String[] indexTypes(CriteriaQuery query, ElasticsearchPersistentEntity entity) { + + if (query.getTypes().isEmpty()) { + return new String[] { entity.getIndexType() }; + } + + return query.getTypes().toArray(new String[0]); + } + + private List sort(Query query, ElasticsearchPersistentEntity entity) { + + if (query.getSort() == null || query.getSort().isUnsorted()) { + return Collections.emptyList(); + } + + List mappedSort = new ArrayList<>(); + for (Sort.Order order : query.getSort()) { + + FieldSortBuilder sort = SortBuilders.fieldSort(entity.getPersistentProperty(order.getProperty()).getFieldName()) + .order(order.getDirection().isDescending() ? SortOrder.DESC : SortOrder.ASC); + + if (order.getNullHandling() == Sort.NullHandling.NULLS_FIRST) { + sort.missing("_first"); + } else if (order.getNullHandling() == Sort.NullHandling.NULLS_LAST) { + sort.missing("_last"); + } + + mappedSort.add(sort); + } + + return mappedSort; + } + + private QueryBuilder mappedQuery(CriteriaQuery query, ElasticsearchPersistentEntity entity) { + + // TODO: we need to actually map the fields to the according field names! + QueryBuilder elasticsearchQuery = new CriteriaQueryProcessor().createQueryFromCriteria(query.getCriteria()); + return elasticsearchQuery != null ? elasticsearchQuery : QueryBuilders.matchAllQuery(); + } + + private QueryBuilder mappedFilterQuery(CriteriaQuery query, ElasticsearchPersistentEntity entity) { + + // TODO: this is actually strange in the RestTemplate:L378 - need to chack + return null; + } + + private ElasticsearchPersistentEntity lookupPersistentEntity(Class type) { + return converter.getMappingContext().getPersistentEntity(type); + } + + private Throwable translateException(Throwable throwable) { + + if (!(throwable instanceof RuntimeException)) { + return throwable; + } + + RuntimeException ex = exceptionTranslator.translateExceptionIfPossible((RuntimeException) throwable); + return ex != null ? ex : throwable; + } + + // Additional types + public interface ClientCallback { + + T doWithClient(ReactiveElasticsearchClient client); + } +} diff --git a/src/test/java/org/springframework/data/elasticsearch/TestUtils.java b/src/test/java/org/springframework/data/elasticsearch/TestUtils.java new file mode 100644 index 000000000..0280a510a --- /dev/null +++ b/src/test/java/org/springframework/data/elasticsearch/TestUtils.java @@ -0,0 +1,63 @@ +/* + * Copyright 2018 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.data.elasticsearch; + +import lombok.SneakyThrows; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.RestHighLevelClient; +import org.springframework.data.elasticsearch.client.ElasticsearchClients; +import org.springframework.data.elasticsearch.client.reactive.ReactiveElasticsearchClient; +import org.springframework.util.ObjectUtils; + +/** + * @author Christoph Strobl + * @currentRead Fool's Fate - Robin Hobb + */ +public final class TestUtils { + + private TestUtils() {} + + public static RestHighLevelClient restHighLevelClient() { + return ElasticsearchClients.createClient().connectedToLocalhost().rest(); + } + + public static ReactiveElasticsearchClient reactiveClient() { + return ElasticsearchClients.createClient().connectedToLocalhost().reactive(); + } + + @SneakyThrows + public static void deleteIndex(String... indexes) { + + if (ObjectUtils.isEmpty(indexes)) { + return; + } + + try (RestHighLevelClient client = restHighLevelClient()) { + for (String index : indexes) { + + try { + client.indices().delete(new DeleteIndexRequest(index), RequestOptions.DEFAULT); + } catch (ElasticsearchStatusException ex) { + // just ignore it + } + } + } + } +} diff --git a/src/test/java/org/springframework/data/elasticsearch/client/reactive/MultiNodeHostProviderUnitTests.java b/src/test/java/org/springframework/data/elasticsearch/client/reactive/MultiNodeHostProviderUnitTests.java new file mode 100644 index 000000000..5cba3982c --- /dev/null +++ b/src/test/java/org/springframework/data/elasticsearch/client/reactive/MultiNodeHostProviderUnitTests.java @@ -0,0 +1,141 @@ +/* + * Copyright 2018 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.data.elasticsearch.client.reactive; + +import static org.assertj.core.api.Assertions.*; +import static org.mockito.Mockito.*; + +import reactor.core.publisher.Mono; +import reactor.test.StepVerifier; + +import org.junit.Before; +import org.junit.Test; +import org.springframework.data.elasticsearch.client.reactive.HostProvider.VerificationMode; +import org.springframework.data.elasticsearch.client.ElasticsearchHost; +import org.springframework.data.elasticsearch.client.ElasticsearchHost.State; +import org.springframework.data.elasticsearch.client.reactive.ReactiveMockClientTestsUtils.MockDelegatingElasticsearchHostProvider; +import org.springframework.data.elasticsearch.client.reactive.ReactiveMockClientTestsUtils.WebClientProvider.Receive; +import org.springframework.web.reactive.function.client.ClientResponse; + +/** + * @author Christoph Strobl + * @currentRead Golden Fool - Robin Hobb + */ +public class MultiNodeHostProviderUnitTests { + + static final String HOST_1 = ":9200"; + static final String HOST_2 = ":9201"; + static final String HOST_3 = ":9202"; + + MockDelegatingElasticsearchHostProvider mock; + MultiNodeHostProvider provider; + + @Before + public void setUp() { + + mock = ReactiveMockClientTestsUtils.multi(HOST_1, HOST_2, HOST_3); + provider = mock.getDelegate(); + } + + @Test // DATAES-488 + public void refreshHostStateShouldUpdateNodeStateCorrectly() { + + mock.when(HOST_1).receive(Receive::error); + mock.when(HOST_2).receive(Receive::ok); + mock.when(HOST_3).receive(Receive::ok); + + provider.clusterInfo().as(StepVerifier::create).expectNextCount(1).verifyComplete(); + + assertThat(provider.getCachedHostState()).extracting(ElasticsearchHost::getState).containsExactly(State.OFFLINE, + State.ONLINE, State.ONLINE); + } + + @Test // DATAES-488 + public void getActiveReturnsFirstActiveHost() { + + mock.when(HOST_1).receive(Receive::error); + mock.when(HOST_2).receive(Receive::ok); + mock.when(HOST_3).receive(Receive::error); + + provider.getActive().as(StepVerifier::create).expectNext(mock.client(HOST_2)).verifyComplete(); + } + + @Test // DATAES-488 + public void getActiveErrorsWhenNoActiveHostFound() { + + mock.when(HOST_1).receive(Receive::error); + mock.when(HOST_2).receive(Receive::error); + mock.when(HOST_3).receive(Receive::error); + + provider.getActive().as(StepVerifier::create).expectError(IllegalStateException.class); + } + + @Test // DATAES-488 + public void lazyModeDoesNotResolveHostsTwice() { + + mock.when(HOST_1).receive(Receive::error); + mock.when(HOST_2).receive(Receive::ok); + mock.when(HOST_3).receive(Receive::error); + + provider.clusterInfo().as(StepVerifier::create).expectNextCount(1).verifyComplete(); + + provider.getActive(VerificationMode.LAZY).as(StepVerifier::create).expectNext(mock.client(HOST_2)).verifyComplete(); + + verify(mock.client(":9201")).head(); + } + + @Test // DATAES-488 + public void alwaysModeDoesNotResolveHostsTwice() { + + mock.when(HOST_1).receive(Receive::error); + mock.when(HOST_2).receive(Receive::ok); + mock.when(HOST_3).receive(Receive::error); + + provider.clusterInfo().as(StepVerifier::create).expectNextCount(1).verifyComplete(); + + provider.getActive(VerificationMode.FORCE).as(StepVerifier::create).expectNext(mock.client(HOST_2)) + .verifyComplete(); + + verify(mock.client(HOST_2), times(2)).head(); + } + + @Test // DATAES-488 + public void triesDeadHostsIfNoActiveFound() { + + mock.when(HOST_1).receive(Receive::error); + mock.when(HOST_2).get(requestHeadersUriSpec -> { + + ClientResponse response1 = mock(ClientResponse.class); + Receive.error(response1); + + ClientResponse response2 = mock(ClientResponse.class); + Receive.ok(response2); + + when(requestHeadersUriSpec.exchange()).thenReturn(Mono.just(response1), Mono.just(response2)); + }); + + mock.when(HOST_3).receive(Receive::error); + + provider.clusterInfo().as(StepVerifier::create).expectNextCount(1).verifyComplete(); + assertThat(provider.getCachedHostState()).extracting(ElasticsearchHost::getState).containsExactly(State.OFFLINE, + State.OFFLINE, State.OFFLINE); + + provider.getActive().as(StepVerifier::create).expectNext(mock.client(HOST_2)).verifyComplete(); + + verify(mock.client(HOST_2), times(2)).head(); + } +} diff --git a/src/test/java/org/springframework/data/elasticsearch/client/reactive/ReactiveElasticsearchClientTests.java b/src/test/java/org/springframework/data/elasticsearch/client/reactive/ReactiveElasticsearchClientTests.java new file mode 100644 index 000000000..daf34fd3c --- /dev/null +++ b/src/test/java/org/springframework/data/elasticsearch/client/reactive/ReactiveElasticsearchClientTests.java @@ -0,0 +1,467 @@ +/* + * Copyright 2018 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.data.elasticsearch.client.reactive; + +import static org.assertj.core.api.Assertions.*; + +import reactor.test.StepVerifier; + +import java.io.IOException; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.UUID; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.MultiGetRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.springframework.data.elasticsearch.TestUtils; +import org.springframework.http.HttpHeaders; +import org.springframework.lang.Nullable; + +/** + * @author Christoph Strobl + * @currentRead Fool's Fate - Robin Hobb + */ +public class ReactiveElasticsearchClientTests { + + static final String INDEX_I = "idx-1-reactive-client-tests"; + static final String INDEX_II = "idx-2-reactive-client-tests"; + + static final String TYPE_I = "doc-type-1"; + static final String TYPE_II = "doc-type-2"; + + static final Map DOC_SOURCE; + + RestHighLevelClient syncClient; + ReactiveElasticsearchClient client; + + static { + + Map source = new LinkedHashMap<>(); + source.put("firstname", "chade"); + source.put("lastname", "fallstar"); + + DOC_SOURCE = Collections.unmodifiableMap(source); + } + + @Before + public void setUp() { + + syncClient = TestUtils.restHighLevelClient(); + client = TestUtils.reactiveClient(); + } + + @After + public void after() throws IOException { + + TestUtils.deleteIndex(INDEX_I, INDEX_II); + + syncClient.close(); + } + + @Test // DATAES-488 + public void pingForActiveHostShouldReturnTrue() { + client.ping().as(StepVerifier::create) // + .expectNext(true) // + .verifyComplete(); + } + + @Test // DATAES-488 + public void pingForUnknownHostShouldReturnFalse() { + + DefaultReactiveElasticsearchClient.create(HttpHeaders.EMPTY, "http://localhost:4711").ping() // + .as(StepVerifier::create) // + .expectNext(false) // + .verifyComplete(); + } + + @Test // DATAES-488 + public void infoShouldReturnClusterInformation() { + + client.info().as(StepVerifier::create) // + .consumeNextWith(it -> { + + assertThat(it.isAvailable()).isTrue(); + assertThat(it.getVersion()).isGreaterThanOrEqualTo(Version.CURRENT); + }) // + .verifyComplete(); + } + + @Test // DATAES-488 + public void getShouldFetchDocumentById() { + + String id = addSourceDocument().ofType(TYPE_I).to(INDEX_I); + + client.get(new GetRequest(INDEX_I, TYPE_I, id)) // + .as(StepVerifier::create) // + .consumeNextWith(it -> { + + assertThat(it.getId()).isEqualTo(id); + assertThat(it.getSource()).containsAllEntriesOf(DOC_SOURCE); + }) // + .verifyComplete(); + } + + @Test // DATAES-488 + public void getShouldCompleteForNonExistingDocuments() { + + addSourceDocument().ofType(TYPE_I).to(INDEX_I); + + String id = "this-one-does-not-exist"; + client.get(new GetRequest(INDEX_I, TYPE_I, id)) // + .as(StepVerifier::create) // + .verifyComplete(); + } + + @Test // DATAES-488 + public void getShouldCompleteForNonExistingType() { + + String id = addSourceDocument().ofType(TYPE_I).to(INDEX_I); + + client.get(new GetRequest(INDEX_I, "fantasy-books", id)) // + .as(StepVerifier::create) // + .verifyComplete(); + } + + @Test // DATAES-488 + public void multiGetShouldReturnAllDocumentsFromSameCollection() { + + String id1 = addSourceDocument().ofType(TYPE_I).to(INDEX_I); + String id2 = addSourceDocument().ofType(TYPE_I).to(INDEX_I); + + MultiGetRequest request = new MultiGetRequest() // + .add(INDEX_I, TYPE_I, id1) // + .add(INDEX_I, TYPE_I, id2); + + client.multiGet(request) // + .as(StepVerifier::create) // + .consumeNextWith(it -> { + assertThat(it.getId()).isEqualTo(id1); + }) // + .consumeNextWith(it -> { + assertThat(it.getId()).isEqualTo(id2); + }) // + .verifyComplete(); + } + + @Test // DATAES-488 + public void multiGetShouldReturnAllExistingDocumentsFromSameCollection() { + + String id1 = addSourceDocument().ofType(TYPE_I).to(INDEX_I); + addSourceDocument().ofType(TYPE_I).to(INDEX_I); + + MultiGetRequest request = new MultiGetRequest() // + .add(INDEX_I, TYPE_I, id1) // + .add(INDEX_I, TYPE_I, "this-one-does-not-exist"); + + client.multiGet(request) // + .as(StepVerifier::create) // + .consumeNextWith(it -> { + assertThat(it.getId()).isEqualTo(id1); + }) // + .verifyComplete(); + } + + @Test // DATAES-488 + public void multiGetShouldSkipNonExistingDocuments() { + + String id1 = addSourceDocument().ofType(TYPE_I).to(INDEX_I); + String id2 = addSourceDocument().ofType(TYPE_I).to(INDEX_I); + + MultiGetRequest request = new MultiGetRequest() // + .add(INDEX_I, TYPE_I, id1) // + .add(INDEX_I, TYPE_I, "this-one-does-not-exist") // + .add(INDEX_I, TYPE_I, id2); // + + client.multiGet(request) // + .as(StepVerifier::create) // + .consumeNextWith(it -> { + assertThat(it.getId()).isEqualTo(id1); + }) // + .consumeNextWith(it -> { + assertThat(it.getId()).isEqualTo(id2); + }) // + .verifyComplete(); + } + + @Test // DATAES-488 + public void multiGetShouldCompleteIfNothingFound() { + + String id1 = addSourceDocument().ofType(TYPE_I).to(INDEX_I); + String id2 = addSourceDocument().ofType(TYPE_I).to(INDEX_I); + + client.multiGet(new MultiGetRequest().add(INDEX_II, TYPE_I, id1).add(INDEX_II, TYPE_I, id2)) // + .as(StepVerifier::create) // + .verifyComplete(); + } + + @Test // DATAES-488 + public void multiGetShouldReturnAllExistingDocumentsFromDifferentCollection() { + + String id1 = addSourceDocument().ofType(TYPE_I).to(INDEX_I); + String id2 = addSourceDocument().ofType(TYPE_II).to(INDEX_II); + + MultiGetRequest request = new MultiGetRequest() // + .add(INDEX_I, TYPE_I, id1) // + .add(INDEX_II, TYPE_II, id2); + + client.multiGet(request) // + .as(StepVerifier::create) // + .consumeNextWith(it -> { + assertThat(it.getId()).isEqualTo(id1); + }) // + .consumeNextWith(it -> { + assertThat(it.getId()).isEqualTo(id2); + }) // + .verifyComplete(); + } + + @Test // DATAES-488 + public void existsReturnsTrueForExistingDocuments() { + + String id = addSourceDocument().ofType(TYPE_I).to(INDEX_I); + + client.exists(new GetRequest(INDEX_I, TYPE_I, id)) // + .as(StepVerifier::create) // + .expectNext(true)// + .verifyComplete(); + } + + @Test // DATAES-488 + public void existsReturnsFalseForNonExistingDocuments() { + + String id = addSourceDocument().ofType(TYPE_I).to(INDEX_I); + + client.exists(new GetRequest(INDEX_II, TYPE_I, id)) // + .as(StepVerifier::create) // + .expectNext(false)// + .verifyComplete(); + } + + @Test // DATAES-488 + public void indexShouldAddDocument() { + + IndexRequest request = indexRequest(DOC_SOURCE, INDEX_I, TYPE_I); + + client.index(request) // + .as(StepVerifier::create) // + .consumeNextWith(it -> { + + assertThat(it.status()).isEqualTo(RestStatus.CREATED); + assertThat(it.getId()).isEqualTo(request.id()); + })// + .verifyComplete(); + } + + @Test // DATAES-488 + public void indexShouldErrorForExistingDocuments() { + + String id = addSourceDocument().ofType(TYPE_I).to(INDEX_I); + + IndexRequest request = indexRequest(DOC_SOURCE, INDEX_I, TYPE_I)// + .id(id); + + client.index(request) // + .as(StepVerifier::create) // + .consumeErrorWith(error -> { + assertThat(error).isInstanceOf(ElasticsearchStatusException.class); + }) // + .verify(); + } + + @Test // DATAES-488 + public void updateShouldUpsertNonExistingDocumentWhenUsedWithUpsert() { + + String id = UUID.randomUUID().toString(); + UpdateRequest request = new UpdateRequest(INDEX_I, TYPE_I, id) // + .doc(DOC_SOURCE) // + .docAsUpsert(true); + + client.update(request) // + .as(StepVerifier::create) // + .consumeNextWith(it -> { + + assertThat(it.status()).isEqualTo(RestStatus.CREATED); + assertThat(it.getId()).isEqualTo(id); + }) // + .verifyComplete(); + } + + @Test // DATAES-488 + public void updateShouldUpdateExistingDocument() { + + String id = addSourceDocument().ofType(TYPE_I).to(INDEX_I); + + UpdateRequest request = new UpdateRequest(INDEX_I, TYPE_I, id) // + .doc(Collections.singletonMap("dutiful", "farseer")); + + client.update(request) // + .as(StepVerifier::create) // + .consumeNextWith(it -> { + + assertThat(it.status()).isEqualTo(RestStatus.OK); + assertThat(it.getId()).isEqualTo(id); + assertThat(it.getVersion()).isEqualTo(2); + }) // + .verifyComplete(); + } + + @Test // DATAES-488 + public void updateShouldErrorNonExistingDocumentWhenNotUpserted() { + + String id = UUID.randomUUID().toString(); + UpdateRequest request = new UpdateRequest(INDEX_I, TYPE_I, id) // + .doc(DOC_SOURCE); + + client.update(request) // + .as(StepVerifier::create) // + .consumeErrorWith(error -> { + assertThat(error).isInstanceOf(ElasticsearchStatusException.class); + }) // + .verify(); + } + + @Test // DATAES-488 + public void deleteShouldRemoveExistingDocument() { + + String id = addSourceDocument().ofType(TYPE_I).to(INDEX_I); + + DeleteRequest request = new DeleteRequest(INDEX_I, TYPE_I, id); + + client.delete(request) // + .as(StepVerifier::create) // + .consumeNextWith(it -> { + assertThat(it.status()).isEqualTo(RestStatus.OK); + }) // + .verifyComplete(); + } + + @Test // DATAES-488 + public void deleteShouldReturnNotFoundForNonExistingDocument() { + + addSourceDocument().ofType(TYPE_I).to(INDEX_I); + + DeleteRequest request = new DeleteRequest(INDEX_I, TYPE_I, "this-one-does-not-exist"); + + client.delete(request) // + .as(StepVerifier::create) // + .consumeNextWith(it -> { + assertThat(it.status()).isEqualTo(RestStatus.NOT_FOUND); + }) // + .verifyComplete(); + } + + @Test // DATAES-488 + public void searchShouldFindExistingDocuments() { + + addSourceDocument().ofType(TYPE_I).to(INDEX_I); + addSourceDocument().ofType(TYPE_I).to(INDEX_I); + + SearchRequest request = new SearchRequest(INDEX_I).types(TYPE_I) // + .source(new SearchSourceBuilder().query(QueryBuilders.matchAllQuery())); + + client.search(request) // + .as(StepVerifier::create) // + .expectNextCount(2) // + .verifyComplete(); + } + + @Test // DATAES-488 + public void searchShouldCompleteIfNothingFound() throws IOException { + + syncClient.indices().create(new CreateIndexRequest(INDEX_I)); + + SearchRequest request = new SearchRequest(INDEX_I).types(TYPE_I) // + .source(new SearchSourceBuilder().query(QueryBuilders.matchAllQuery())); + + client.search(request) // + .as(StepVerifier::create) // + .verifyComplete(); + } + + AddToIndexOfType addSourceDocument() { + return add(DOC_SOURCE); + } + + AddToIndexOfType add(Map source) { + return new AddDocument(source); + } + + IndexRequest indexRequest(Map source, String index, String type) { + + return new IndexRequest(index, type) // + .id(UUID.randomUUID().toString()) // + .source(source) // + .setRefreshPolicy(RefreshPolicy.IMMEDIATE) // + .create(true); + } + + String doIndex(Map source, String index, String type) { + + try { + return syncClient.index(indexRequest(source, index, type)).getId(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + interface AddToIndexOfType extends AddToIndex { + AddToIndex ofType(String type); + } + + interface AddToIndex { + String to(String index); + } + + class AddDocument implements AddToIndexOfType { + + Map source; + @Nullable String type; + + AddDocument(Map source) { + this.source = source; + } + + @Override + public AddToIndex ofType(String type) { + + this.type = type; + return this; + } + + @Override + public String to(String index) { + return doIndex(new LinkedHashMap(source), index, type); + } + } + +} diff --git a/src/test/java/org/springframework/data/elasticsearch/client/reactive/ReactiveElasticsearchClientUnitTests.java b/src/test/java/org/springframework/data/elasticsearch/client/reactive/ReactiveElasticsearchClientUnitTests.java new file mode 100644 index 000000000..0729a11f6 --- /dev/null +++ b/src/test/java/org/springframework/data/elasticsearch/client/reactive/ReactiveElasticsearchClientUnitTests.java @@ -0,0 +1,554 @@ +/* + * Copyright 2018 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.data.elasticsearch.client.reactive; + +import static org.assertj.core.api.Assertions.*; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.*; +import static org.springframework.data.elasticsearch.client.reactive.ReactiveMockClientTestsUtils.WebClientProvider.Receive.*; + +import reactor.test.StepVerifier; + +import java.util.Collections; +import java.util.Map; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.DocWriteResponse.Result; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.MultiGetRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.common.xcontent.XContentType; +import org.junit.Before; +import org.junit.Test; +import org.reactivestreams.Publisher; +import org.springframework.data.elasticsearch.client.reactive.ReactiveMockClientTestsUtils.MockDelegatingElasticsearchHostProvider; +import org.springframework.data.elasticsearch.client.reactive.ReactiveMockClientTestsUtils.WebClientProvider.Receive; +import org.springframework.http.HttpMethod; +import org.springframework.http.HttpStatus; +import org.springframework.http.MediaType; + +/** + * @author Christoph Strobl + * @currentRead Golden Fool - Robin Hobb + */ +public class ReactiveElasticsearchClientUnitTests { + + static final String HOST = ":9200"; + + MockDelegatingElasticsearchHostProvider hostProvider; + ReactiveElasticsearchClient client; + + @Before + public void setUp() { + + hostProvider = ReactiveMockClientTestsUtils.provider(HOST).withActiveDefaultHost(HOST); + client = new DefaultReactiveElasticsearchClient(hostProvider); + } + + // --> PING + + @Test + public void pingShouldHitMainEndpoint() { + + hostProvider.when(HOST) // + .receive(Receive::ok); + + client.ping() // + .then() // + .as(StepVerifier::create) // + .verifyComplete(); + + hostProvider.when(HOST).exchange(requestBodyUriSpec -> { + verify(requestBodyUriSpec).uri(eq("/"), any(Map.class)); + }); + } + + @Test // DATAES-488 + public void pingShouldReturnTrueOnHttp200() { + + hostProvider.when(HOST) // + .receive(Receive::ok); + + client.ping() // + .as(StepVerifier::create) // + .expectNext(true) // + .verifyComplete(); + } + + @Test // DATAES-488 + public void pingShouldReturnFalseOnNonHttp200() { + + hostProvider.when(HOST) // + .receive(Receive::error); + + client.ping() // + .as(StepVerifier::create) // + .expectNext(false) // + .verifyComplete(); + } + + // --> INFO + + @Test + public void infoShouldHitMainEndpoint() { + + hostProvider.when(HOST) // + .receiveInfo(); + + client.info() // + .then() // + .as(StepVerifier::create) // + .verifyComplete(); + + hostProvider.when(HOST).exchange(requestBodyUriSpec -> { + verify(requestBodyUriSpec).uri(eq("/"), any(Map.class)); + }); + } + + @Test // DATAES-488 + public void infoShouldReturnResponseCorrectly() { + + hostProvider.when(HOST) // + .receiveInfo(); + + client.info() // + .as(StepVerifier::create) // + .consumeNextWith(mainResponse -> {}) // + .verifyComplete(); + } + + // --> GET + + @Test // DATAES-488 + public void getShouldHitGetEndpoint() { + + hostProvider.when(HOST).receive(clientResponse -> { + when(clientResponse.statusCode()).thenReturn(HttpStatus.ACCEPTED, HttpStatus.NOT_FOUND); + }); + + hostProvider.when(HOST) // + .receiveGetByIdNotFound(); + + client.get(new GetRequest("twitter").id("1")) // + .then() // + .as(StepVerifier::create) // + .verifyComplete(); + + verify(hostProvider.client(HOST)).method(HttpMethod.GET); + hostProvider.when(HOST).exchange(requestBodyUriSpec -> { + verify(requestBodyUriSpec).uri(eq("/twitter/_all/1"), any(Map.class)); + }); + } + + @Test // DATAES-488 + public void getShouldReturnExistingDocument() { + + hostProvider.when(HOST) // + .receiveGetById(); + + client.get(new GetRequest("twitter").id("1")) // + .as(StepVerifier::create) // + .consumeNextWith(result -> { + + assertThat(result.isExists()).isTrue(); + assertThat(result.getIndex()).isEqualTo("twitter"); + assertThat(result.getId()).isEqualTo("1"); + assertThat(result.getSource()) // + .containsEntry("user", "kimchy") // + .containsEntry("message", "Trying out Elasticsearch, so far so good?") // + .containsKey("post_date"); + }) // + .verifyComplete(); + } + + @Test // DATAES-488 + public void getShouldReturnEmptyForNonExisting() { + + hostProvider.when(HOST) // + .receiveGetByIdNotFound(); + + client.get(new GetRequest("twitter").id("1")) // + .as(StepVerifier::create) // + .verifyComplete(); + } + + // --> MGET + + @Test // DATAES-488 + public void multiGetShouldHitMGetEndpoint() { + + hostProvider.when(HOST) // + .receiveJsonFromFile("multi-get-ok-2-hits"); + + client.multiGet(new MultiGetRequest().add("twitter", "_doc", "1").add("twitter", "_doc", "2")) // + .then() // + .as(StepVerifier::create) // + .verifyComplete(); + + verify(hostProvider.client(HOST)).method(HttpMethod.POST); + + hostProvider.when(HOST).exchange(requestBodyUriSpec -> { + + verify(requestBodyUriSpec).uri(eq("/_mget"), any(Map.class)); + verify(requestBodyUriSpec).body(any(Publisher.class), any(Class.class)); + }); + } + + @Test // DATAES-488 + public void multiGetShouldReturnExistingDocuments() { + + hostProvider.when(HOST) // + .receiveJsonFromFile("multi-get-ok-2-hits"); + + client.multiGet(new MultiGetRequest().add("twitter", "_doc", "1").add("twitter", "_doc", "2")) // + .as(StepVerifier::create) // + .consumeNextWith(result -> { + + assertThat(result.isExists()).isTrue(); + assertThat(result.getIndex()).isEqualTo("twitter"); + assertThat(result.getId()).isEqualTo("1"); + assertThat(result.getSource()) // + .containsEntry("user", "kimchy") // + .containsEntry("message", "Trying out Elasticsearch, so far so good?") // + .containsKey("post_date"); + }) // + .consumeNextWith(result -> { + + assertThat(result.isExists()).isTrue(); + assertThat(result.getIndex()).isEqualTo("twitter"); + assertThat(result.getId()).isEqualTo("2"); + assertThat(result.getSource()) // + .containsEntry("user", "kimchy") // + .containsEntry("message", "Another tweet, will it be indexed?") // + .containsKey("post_date"); + }) // + .verifyComplete(); + } + + @Test // DATAES-488 + public void multiGetShouldWorkForNonExistingDocuments() { + + hostProvider.when(HOST) // + .receiveJsonFromFile("multi-get-ok-2-hits-1-unavailable"); + + client.multiGet(new MultiGetRequest().add("twitter", "_doc", "1").add("twitter", "_doc", "2")) // + .as(StepVerifier::create) // + .consumeNextWith(result -> { + + assertThat(result.isExists()).isTrue(); + assertThat(result.getIndex()).isEqualTo("twitter"); + assertThat(result.getId()).isEqualTo("1"); + assertThat(result.getSource()) // + .containsEntry("user", "kimchy") // + .containsEntry("message", "Trying out Elasticsearch, so far so good?") // + .containsKey("post_date"); + }) // + .consumeNextWith(result -> { + + assertThat(result.isExists()).isTrue(); + assertThat(result.getIndex()).isEqualTo("twitter"); + assertThat(result.getId()).isEqualTo("3"); + assertThat(result.getSource()) // + .containsEntry("user", "elastic") // + .containsEntry("message", "Building the site, should be kewl") // + .containsKey("post_date"); + }) // + .verifyComplete(); + } + + // --> EXISTS + + @Test // DATAES-488 + public void existsShouldHitGetEndpoint() { + + hostProvider.when(HOST) // + .receiveGetById(); + + client.exists(new GetRequest("twitter").id("1")) // + .then() // + .as(StepVerifier::create) // + .verifyComplete(); + + verify(hostProvider.client(HOST)).method(HttpMethod.HEAD); + + hostProvider.when(HOST).exchange(requestBodyUriSpec -> { + verify(requestBodyUriSpec).uri(eq("/twitter/_all/1"), any(Map.class)); + }); + } + + @Test // DATAES-488 + public void existsShouldReturnTrueIfExists() { + + hostProvider.when(HOST) // + .receiveGetById(); + + client.exists(new GetRequest("twitter").id("1")) // + .as(StepVerifier::create) // + .expectNext(true).verifyComplete(); + } + + @Test // DATAES-488 + public void existsShouldReturnFalseIfNotExists() { + + hostProvider.when(HOST) // + .receiveGetByIdNotFound(); + + client.exists(new GetRequest("twitter").id("1")) // + .as(StepVerifier::create) // + .expectNext(false).verifyComplete(); + } + + // --> INDEX + + @Test // DATAES-488 + public void indexNewShouldHitCreateEndpoint() { + + hostProvider.when(HOST) // + .receiveIndexCreated(); + + client.index(new IndexRequest("twitter").id("10").create(true).source(" { foo : \"bar\" }", XContentType.JSON)) + .then() // + .as(StepVerifier::create) // + .verifyComplete(); + + verify(hostProvider.client(HOST)).method(HttpMethod.PUT); + hostProvider.when(HOST).exchange(requestBodyUriSpec -> { + + verify(requestBodyUriSpec).uri(eq("/twitter/10/_create"), any(Map.class)); + verify(requestBodyUriSpec).contentType(MediaType.APPLICATION_JSON); + }); + } + + @Test // DATAES-488 + public void indexExistingShouldHitEndpointCorrectly() { + + hostProvider.when(HOST) // + .receiveIndexUpdated(); + + client.index(new IndexRequest("twitter").id("10").source(" { foo : \"bar\" }", XContentType.JSON)).then() // + .as(StepVerifier::create) // + .verifyComplete(); + + verify(hostProvider.client(HOST)).method(HttpMethod.PUT); + hostProvider.when(HOST).exchange(requestBodyUriSpec -> { + + verify(requestBodyUriSpec).uri(eq("/twitter/10"), any(Map.class)); + verify(requestBodyUriSpec).contentType(MediaType.APPLICATION_JSON); + }); + } + + @Test // DATAES-488 + public void indexShouldReturnCreatedWhenNewDocumentIndexed() { + + hostProvider.when(HOST) // + .receiveIndexCreated(); + + client.index(new IndexRequest("twitter").id("10").create(true).source(" { foo : \"bar\" }", XContentType.JSON)) + .as(StepVerifier::create) // + .consumeNextWith(response -> { + + assertThat(response.getId()).isEqualTo("10"); + assertThat(response.getIndex()).isEqualTo("twitter"); + assertThat(response.getResult()).isEqualTo(Result.CREATED); + }) // + .verifyComplete(); + } + + @Test // DATAES-488 + public void indexShouldReturnUpdatedWhenExistingDocumentIndexed() { + + hostProvider.when(HOST) // + .receiveIndexUpdated(); + + client.index(new IndexRequest("twitter").id("1").source(" { foo : \"bar\" }", XContentType.JSON)) + .as(StepVerifier::create) // + .consumeNextWith(response -> { + + assertThat(response.getId()).isEqualTo("1"); + assertThat(response.getIndex()).isEqualTo("twitter"); + assertThat(response.getResult()).isEqualTo(Result.UPDATED); + }) // + .verifyComplete(); + } + + // --> UPDATE + + @Test // DATAES-488 + public void updateShouldHitEndpointCorrectly() { + + hostProvider.when(HOST) // + .receiveUpdateOk(); + + client.update(new UpdateRequest("twitter", "doc", "1").doc(Collections.singletonMap("user", "cstrobl"))).then() // + .as(StepVerifier::create) // + .verifyComplete(); + + verify(hostProvider.client(HOST)).method(HttpMethod.POST); + hostProvider.when(HOST).exchange(requestBodyUriSpec -> { + + verify(requestBodyUriSpec).uri(eq("/twitter/doc/1/_update"), any(Map.class)); + verify(requestBodyUriSpec).contentType(MediaType.APPLICATION_JSON); + }); + } + + @Test // DATAES-488 + public void updateShouldEmitResponseCorrectly() { + + hostProvider.when(HOST) // + .receiveUpdateOk(); + + client.update(new UpdateRequest("twitter", "doc", "1").doc(Collections.singletonMap("user", "cstrobl"))) + .as(StepVerifier::create) // + .consumeNextWith(updateResponse -> { + + assertThat(updateResponse.getResult()).isEqualTo(Result.UPDATED); + assertThat(updateResponse.getVersion()).isEqualTo(2); + assertThat(updateResponse.getId()).isEqualTo("1"); + assertThat(updateResponse.getIndex()).isEqualTo("twitter"); + }) // + .verifyComplete(); + } + + @Test // DATAES-488 + public void updateShouldEmitErrorWhenNotFound() { + + hostProvider.when(HOST) // + .updateFail(); + + client.update(new UpdateRequest("twitter", "doc", "1").doc(Collections.singletonMap("user", "cstrobl"))) + .as(StepVerifier::create) // + .expectError(ElasticsearchStatusException.class) // + .verify(); + } + + // --> DELETE + + @Test // DATAES-488 + public void deleteShouldHitEndpointCorrectly() { + + hostProvider.when(HOST) // + .receiveDeleteOk(); + + client.delete(new DeleteRequest("twitter", "doc", "1")).then() // + .as(StepVerifier::create) // + .verifyComplete(); + + verify(hostProvider.client(HOST)).method(HttpMethod.DELETE); + hostProvider.when(HOST).exchange(requestBodyUriSpec -> { + verify(requestBodyUriSpec).uri(eq("/twitter/doc/1"), any(Map.class)); + }); + } + + @Test // DATAES-488 + public void deleteShouldEmitResponseCorrectly() { + + hostProvider.when(HOST) // + .receiveDeleteOk(); + + client.delete(new DeleteRequest("twitter", "doc", "1")) // + .as(StepVerifier::create) // + .consumeNextWith(deleteResponse -> { + + assertThat(deleteResponse.getResult()).isEqualTo(Result.DELETED); + assertThat(deleteResponse.getVersion()).isEqualTo(1); + assertThat(deleteResponse.getId()).isEqualTo("1"); + assertThat(deleteResponse.getIndex()).isEqualTo("twitter"); + }) // + .verifyComplete(); + } + + // --> SEARCH + + @Test // DATAES-488 + public void searchShouldHitSearchEndpoint() { + + hostProvider.when(HOST) // + .receiveSearchOk(); + + client.search(new SearchRequest("twitter")).as(StepVerifier::create).verifyComplete(); + + verify(hostProvider.client(HOST)).method(HttpMethod.POST); + hostProvider.when(HOST).exchange(requestBodyUriSpec -> { + verify(requestBodyUriSpec).uri(eq("/twitter/_search"), any(Map.class)); + }); + } + + @Test // DATAES-488 + public void searchShouldReturnSingleResultCorrectly() { + + hostProvider.when(HOST) // + .receive(Receive::json) // + .body(fromPath("search-ok-single-hit")); + + client.search(new SearchRequest("twitter")) // + .as(StepVerifier::create) // + .consumeNextWith(hit -> { + + assertThat(hit.getId()).isEqualTo("2"); + assertThat(hit.getIndex()).isEqualTo("twitter"); + assertThat(hit.getSourceAsMap()) // + .containsEntry("user", "kimchy") // + .containsEntry("message", "Another tweet, will it be indexed?") // + .containsKey("post_date"); + }).verifyComplete(); + } + + @Test // DATAES-488 + public void searchShouldReturnMultipleResultsCorrectly() { + + hostProvider.when(HOST) // + .receive(Receive::json) // + .body(fromPath("search-ok-multiple-hits")); + + client.search(new SearchRequest("twitter")) // + .as(StepVerifier::create) // + .consumeNextWith(hit -> { + + assertThat(hit.getId()).isEqualTo("2"); + assertThat(hit.getIndex()).isEqualTo("twitter"); + assertThat(hit.getSourceAsMap()) // + .containsEntry("user", "kimchy") // + .containsEntry("message", "Another tweet, will it be indexed?") // + .containsKey("post_date"); + }) // + .consumeNextWith(hit -> { + + assertThat(hit.getId()).isEqualTo("1"); + assertThat(hit.getIndex()).isEqualTo("twitter"); + assertThat(hit.getSourceAsMap()) // + .containsEntry("user", "kimchy") // + .containsEntry("message", "Trying out Elasticsearch, so far so good?") // + .containsKey("post_date"); + }).verifyComplete(); + } + + @Test // DATAES-488 + public void searchShouldReturnEmptyFluxIfNothingFound() { + + hostProvider.when(HOST) // + .receiveSearchOk(); + + client.search(new SearchRequest("twitter")) // + .as(StepVerifier::create) // + .verifyComplete(); + } + +} diff --git a/src/test/java/org/springframework/data/elasticsearch/client/reactive/ReactiveMockClientTestsUtils.java b/src/test/java/org/springframework/data/elasticsearch/client/reactive/ReactiveMockClientTestsUtils.java new file mode 100644 index 000000000..96e649999 --- /dev/null +++ b/src/test/java/org/springframework/data/elasticsearch/client/reactive/ReactiveMockClientTestsUtils.java @@ -0,0 +1,432 @@ +/* + * Copyright 2018 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.data.elasticsearch.client.reactive; + +import static org.mockito.Mockito.*; + +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.function.Consumer; +import java.util.function.Supplier; + +import org.mockito.Mockito; +import org.springframework.core.io.ClassPathResource; +import org.springframework.core.io.Resource; +import org.springframework.core.io.buffer.DataBuffer; +import org.springframework.core.io.buffer.DefaultDataBufferFactory; +import org.springframework.data.elasticsearch.client.ElasticsearchHost; +import org.springframework.data.elasticsearch.client.reactive.ReactiveMockClientTestsUtils.WebClientProvider.Send; +import org.springframework.http.HttpHeaders; +import org.springframework.http.HttpStatus; +import org.springframework.http.MediaType; +import org.springframework.lang.Nullable; +import org.springframework.util.StreamUtils; +import org.springframework.util.StringUtils; +import org.springframework.web.reactive.function.client.ClientResponse; +import org.springframework.web.reactive.function.client.WebClient; +import org.springframework.web.reactive.function.client.WebClient.RequestBodyUriSpec; +import org.springframework.web.reactive.function.client.WebClient.RequestHeadersUriSpec; + +/** + * @author Christoph Strobl + * @since 2018/10 + */ +public class ReactiveMockClientTestsUtils { + + public static MockDelegatingElasticsearchHostProvider single(String host) { + return provider(host); + } + + public static MockDelegatingElasticsearchHostProvider multi(String... hosts) { + return provider(hosts); + } + + public static MockDelegatingElasticsearchHostProvider provider(String... hosts) { + + WebClientProvider clientProvider = new WebClientProvider(); + ErrorCollector errorCollector = new ErrorCollector(); + HostProvider delegate = null; + + if (hosts.length == 1) { + + delegate = new SingleNodeHostProvider(HttpHeaders.EMPTY, errorCollector, hosts[0]) { + @Override // hook in there to modify result + public WebClient createWebClient(String host, HttpHeaders headers) { + return clientProvider.get(host); + } + }; + } else { + + delegate = new MultiNodeHostProvider(HttpHeaders.EMPTY, errorCollector, hosts) { + @Override // hook in there to modify result + public WebClient createWebClient(String host, HttpHeaders headers) { + return clientProvider.get(host); + } + }; + } + + return new MockDelegatingElasticsearchHostProvider(HttpHeaders.EMPTY, clientProvider, errorCollector, delegate, + null); + + } + + public static class ErrorCollector implements Consumer { + + List errors = new CopyOnWriteArrayList<>(); + + @Override + public void accept(Throwable throwable) { + errors.add(throwable); + } + + List captured() { + return Collections.unmodifiableList(errors); + } + } + + public static class MockDelegatingElasticsearchHostProvider implements HostProvider { + + private final T delegate; + private final WebClientProvider clientProvider; + private final ErrorCollector errorCollector; + private @Nullable String activeDefaultHost; + + public MockDelegatingElasticsearchHostProvider(HttpHeaders httpHeaders, WebClientProvider clientProvider, + ErrorCollector errorCollector, T delegate, String activeDefaultHost) { + + this.errorCollector = errorCollector; + this.clientProvider = clientProvider; + this.delegate = delegate; + this.activeDefaultHost = activeDefaultHost; + } + + public Mono lookupActiveHost() { + return delegate.lookupActiveHost(); + } + + public Mono lookupActiveHost(VerificationMode verificationMode) { + + if (StringUtils.hasText(activeDefaultHost)) { + return Mono.just(activeDefaultHost); + } + + return delegate.lookupActiveHost(verificationMode); + } + + public Mono getActive() { + return delegate.getActive(); + } + + public Mono getActive(VerificationMode verificationMode) { + return delegate.getActive(verificationMode); + } + + public Mono getActive(VerificationMode verificationMode, HttpHeaders headers) { + return delegate.getActive(verificationMode, headers); + } + + public WebClient createWebClient(String host, HttpHeaders headers) { + return delegate.createWebClient(host, headers); + } + + @Override + public Mono clusterInfo() { + + if (StringUtils.hasText(activeDefaultHost)) { + return Mono.just(new ClusterInformation(Collections.singleton(ElasticsearchHost.online(activeDefaultHost)))); + } + + return delegate.clusterInfo(); + } + + @Override + public HttpHeaders getDefaultHeaders() { + return delegate.getDefaultHeaders(); + } + + @Override + public HostProvider withDefaultHeaders(HttpHeaders headers) { + throw new UnsupportedOperationException(); + } + + public Send when(String host) { + return clientProvider.when(host); + } + + public WebClient client(String host) { + return clientProvider.when(host).client(); + } + + public List errors() { + return errorCollector.captured(); + } + + public T getDelegate() { + return delegate; + } + + @Override + public HostProvider withErrorListener(Consumer errorListener) { + throw new UnsupportedOperationException(); + } + + public MockDelegatingElasticsearchHostProvider withActiveDefaultHost(String host) { + return new MockDelegatingElasticsearchHostProvider(HttpHeaders.EMPTY, clientProvider, errorCollector, delegate, + host); + } + } + + public static class WebClientProvider { + + private final Object lock = new Object(); + + private Map clientMap; + private Map headersUriSpecMap; + private Map bodyUriSpecMap; + private Map responseMap; + + public WebClientProvider() { + + this.clientMap = new LinkedHashMap<>(); + this.headersUriSpecMap = new LinkedHashMap<>(); + this.bodyUriSpecMap = new LinkedHashMap<>(); + this.responseMap = new LinkedHashMap<>(); + } + + public WebClient get(String host) { + + synchronized (lock) { + + return clientMap.computeIfAbsent(host, key -> { + + WebClient webClient = mock(WebClient.class); + + RequestHeadersUriSpec headersUriSpec = mock(RequestHeadersUriSpec.class); + Mockito.when(webClient.get()).thenReturn(headersUriSpec); + Mockito.when(webClient.head()).thenReturn(headersUriSpec); + + Mockito.when(headersUriSpec.uri(any(String.class))).thenReturn(headersUriSpec); + Mockito.when(headersUriSpec.uri(any(), any(Map.class))).thenReturn(headersUriSpec); + Mockito.when(headersUriSpec.headers(any(Consumer.class))).thenReturn(headersUriSpec); + + RequestBodyUriSpec bodyUriSpec = mock(RequestBodyUriSpec.class); + Mockito.when(webClient.method(any())).thenReturn(bodyUriSpec); + Mockito.when(bodyUriSpec.body(any())).thenReturn(headersUriSpec); + Mockito.when(bodyUriSpec.uri(any(), any(Map.class))).thenReturn(bodyUriSpec); + Mockito.when(bodyUriSpec.headers(any(Consumer.class))).thenReturn(bodyUriSpec); + + ClientResponse response = mock(ClientResponse.class); + Mockito.when(headersUriSpec.exchange()).thenReturn(Mono.just(response)); + Mockito.when(bodyUriSpec.exchange()).thenReturn(Mono.just(response)); + Mockito.when(response.statusCode()).thenReturn(HttpStatus.ACCEPTED); + + headersUriSpecMap.putIfAbsent(host, headersUriSpec); + bodyUriSpecMap.putIfAbsent(host, bodyUriSpec); + responseMap.putIfAbsent(host, response); + + return webClient; + }); + } + } + + public Send when(String host) { + return new CallbackImpl(get(host), headersUriSpecMap.get(host), bodyUriSpecMap.get(host), responseMap.get(host)); + } + + public interface Client { + WebClient client(); + } + + public interface Send extends Receive, Client { + + Receive get(Consumer headerSpec); + + Receive exchange(Consumer bodySpec); + + default Receive receiveJsonFromFile(String file) { + + return receive(Receive::json) // + .body(Receive.fromPath(file)); + } + + default Receive receiveInfo() { + + return receiveJsonFromFile("info") // + .receive(Receive::ok); + + } + + default Receive receiveIndexCreated() { + + return receiveJsonFromFile("index-ok-created") // + .receive(Receive::ok); + } + + default Receive receiveIndexUpdated() { + + return receiveJsonFromFile("index-ok-updated") // + .receive(Receive::ok); + } + + default Receive receiveSearchOk() { + + return receiveJsonFromFile("search-ok-no-hits") // + .receive(Receive::ok); + } + + default Receive receiveGetByIdNotFound() { + + return receiveJsonFromFile("get-by-id-no-hit") // + .receive(response -> { + Mockito.when(response.statusCode()).thenReturn(HttpStatus.ACCEPTED, HttpStatus.NOT_FOUND); + }); + } + + default Receive receiveGetById() { + + return receiveJsonFromFile("get-by-id-ok") // + .receive(Receive::ok); + } + + default Receive receiveUpdateOk() { + + return receiveJsonFromFile("update-ok-updated") // + .receive(Receive::ok); + } + + default Receive receiveDeleteOk() { + + return receiveJsonFromFile("update-ok-deleted") // + .receive(Receive::ok); + } + + default Receive updateFail() { + + return receiveJsonFromFile("update-error-not-found") // + .receive(response -> { + Mockito.when(response.statusCode()).thenReturn(HttpStatus.ACCEPTED, HttpStatus.NOT_FOUND); + }); + } + + } + + public interface Receive { + + Receive receive(Consumer response); + + default Receive body(String json) { + return body(() -> json.getBytes(StandardCharsets.UTF_8)); + } + + default Receive body(Supplier json) { + return body(new DefaultDataBufferFactory().wrap(json.get())); + } + + default Receive body(Resource resource) { + + return body(() -> { + try { + return StreamUtils.copyToByteArray(resource.getInputStream()); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + } + + default Receive body(DataBuffer dataBuffer) { + return receive(response -> Mockito.when(response.body(any())).thenReturn(Flux.just(dataBuffer))); + } + + static void ok(ClientResponse response) { + Mockito.when(response.statusCode()).thenReturn(HttpStatus.ACCEPTED); + } + + static void error(ClientResponse response) { + Mockito.when(response.statusCode()).thenReturn(HttpStatus.INTERNAL_SERVER_ERROR); + } + + static void notFound(ClientResponse response) { + Mockito.when(response.statusCode()).thenReturn(HttpStatus.NOT_FOUND); + } + + static void json(ClientResponse response) { + + ClientResponse.Headers headers = Mockito.mock(ClientResponse.Headers.class); + Mockito.when(headers.contentType()).thenReturn(Optional.of(MediaType.APPLICATION_JSON)); + + Mockito.when(response.headers()).thenReturn(headers); + } + + static Resource fromPath(String filename) { + return new ClassPathResource("/org/springframework/data/elasticsearch/client/" + filename + ".json"); + } + } + + class CallbackImpl implements Send, Receive { + + WebClient client; + RequestHeadersUriSpec headersUriSpec; + RequestBodyUriSpec bodyUriSpec; + ClientResponse responseDelegate; + + public CallbackImpl(WebClient client, RequestHeadersUriSpec headersUriSpec, RequestBodyUriSpec bodyUriSpec, + ClientResponse responseDelegate) { + + this.client = client; + this.headersUriSpec = headersUriSpec; + this.bodyUriSpec = bodyUriSpec; + this.responseDelegate = responseDelegate; + } + + @Override + public Receive get(Consumer uriSpec) { + + uriSpec.accept(headersUriSpec); + return this; + } + + @Override + public Receive exchange(Consumer bodySpec) { + + bodySpec.accept(this.bodyUriSpec); + return this; + } + + @Override + public Receive receive(Consumer response) { + + response.accept(responseDelegate); + return this; + } + + @Override + public WebClient client() { + return client; + } + + } + } +} diff --git a/src/test/java/org/springframework/data/elasticsearch/client/reactive/SingleNodeHostProviderUnitTests.java b/src/test/java/org/springframework/data/elasticsearch/client/reactive/SingleNodeHostProviderUnitTests.java new file mode 100644 index 000000000..81f6c4488 --- /dev/null +++ b/src/test/java/org/springframework/data/elasticsearch/client/reactive/SingleNodeHostProviderUnitTests.java @@ -0,0 +1,76 @@ +/* + * Copyright 2018 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.data.elasticsearch.client.reactive; + +import static org.assertj.core.api.Assertions.assertThat; + +import org.springframework.data.elasticsearch.client.NoReachableHostException; +import reactor.test.StepVerifier; + +import org.junit.Before; +import org.junit.Test; +import org.springframework.data.elasticsearch.client.ElasticsearchHost; +import org.springframework.data.elasticsearch.client.ElasticsearchHost.State; +import org.springframework.data.elasticsearch.client.reactive.ReactiveMockClientTestsUtils.MockDelegatingElasticsearchHostProvider; +import org.springframework.data.elasticsearch.client.reactive.ReactiveMockClientTestsUtils.WebClientProvider.Receive; + +/** + * @author Christoph Strobl + * @currentRead Golden Fool - Robin Hobb + */ +public class SingleNodeHostProviderUnitTests { + + static final String HOST_1 = ":9200"; + + MockDelegatingElasticsearchHostProvider mock; + SingleNodeHostProvider provider; + + @Before + public void setUp() { + + mock = ReactiveMockClientTestsUtils.single(HOST_1); + provider = mock.getDelegate(); + } + + @Test // DATAES-488 + public void refreshHostStateShouldUpdateNodeStateCorrectly() { + + mock.when(HOST_1).receive(Receive::error); + + provider.clusterInfo().as(StepVerifier::create).expectNextCount(1).verifyComplete(); + + assertThat(provider.getCachedHostState()).extracting(ElasticsearchHost::getState).isEqualTo(State.OFFLINE); + } + + @Test // DATAES-488 + public void getActiveReturnsFirstActiveHost() { + + mock.when(HOST_1).receive(Receive::ok); + + provider.clusterInfo().as(StepVerifier::create).expectNextCount(1).verifyComplete(); + + assertThat(provider.getCachedHostState()).extracting(ElasticsearchHost::getState).isEqualTo(State.ONLINE); + } + + @Test // DATAES-488 + public void getActiveErrorsWhenNoActiveHostFound() { + + mock.when(HOST_1).receive(Receive::error); + + provider.getActive().as(StepVerifier::create).expectError(NoReachableHostException.class); + } +} diff --git a/src/test/java/org/springframework/data/elasticsearch/core/ReactiveElasticsearchTemplateTests.java b/src/test/java/org/springframework/data/elasticsearch/core/ReactiveElasticsearchTemplateTests.java new file mode 100644 index 000000000..f5109d788 --- /dev/null +++ b/src/test/java/org/springframework/data/elasticsearch/core/ReactiveElasticsearchTemplateTests.java @@ -0,0 +1,148 @@ +/* + * Copyright 2018 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.data.elasticsearch.core; + +import static org.apache.commons.lang.RandomStringUtils.*; +import static org.assertj.core.api.Assertions.*; + +import reactor.test.StepVerifier; + +import java.util.List; + +import org.junit.Before; +import org.junit.Test; +import org.springframework.data.elasticsearch.TestUtils; +import org.springframework.data.elasticsearch.core.query.Criteria; +import org.springframework.data.elasticsearch.core.query.CriteriaQuery; +import org.springframework.data.elasticsearch.core.query.IndexQuery; +import org.springframework.data.elasticsearch.core.query.IndexQueryBuilder; +import org.springframework.data.elasticsearch.entities.SampleEntity; + +/** + * @author Christoph Strobl + * @currentRead Golden Fool - Robin Hobb + */ +public class ReactiveElasticsearchTemplateTests { + + private ElasticsearchRestTemplate restTemplate; + private ReactiveElasticsearchTemplate template; + + @Before + public void setUp() { + + restTemplate = new ElasticsearchRestTemplate(TestUtils.restHighLevelClient()); + + TestUtils.deleteIndex("test-index-sample"); + + restTemplate.createIndex(SampleEntity.class); + restTemplate.putMapping(SampleEntity.class); + restTemplate.refresh(SampleEntity.class); + + template = new ReactiveElasticsearchTemplate(TestUtils.reactiveClient()); + } + + @Test // DATAES-488 + public void indexWithIdShouldWork() { + + String documentId = randomNumeric(5); + SampleEntity sampleEntity = SampleEntity.builder().id(documentId).message("foo bar") + .version(System.currentTimeMillis()).build(); + + template.index(sampleEntity).as(StepVerifier::create).expectNextCount(1).verifyComplete(); + + restTemplate.refresh(SampleEntity.class); + + List result = restTemplate + .queryForList(new CriteriaQuery(Criteria.where("message").is(sampleEntity.getMessage())), SampleEntity.class); + assertThat(result).hasSize(1); + } + + @Test // DATAES-488 + public void getShouldReturnEntity() { + + String documentId = randomNumeric(5); + SampleEntity sampleEntity = SampleEntity.builder().id(documentId).message("some message") + .version(System.currentTimeMillis()).build(); + + IndexQuery indexQuery = getIndexQuery(sampleEntity); + restTemplate.index(indexQuery); + restTemplate.refresh(SampleEntity.class); + + template.get(documentId, SampleEntity.class) // + .as(StepVerifier::create) // + .expectNext(sampleEntity) // + .verifyComplete(); + } + + @Test // DATAES-488 + public void getForNothing() { + + String documentId = randomNumeric(5); + SampleEntity sampleEntity = SampleEntity.builder().id(documentId).message("some message") + .version(System.currentTimeMillis()).build(); + + IndexQuery indexQuery = getIndexQuery(sampleEntity); + restTemplate.index(indexQuery); + restTemplate.refresh(SampleEntity.class); + + template.get("foo", SampleEntity.class) // + .as(StepVerifier::create) // + .verifyComplete(); + } + + @Test // DATAES-488 + public void findShouldApplyCriteria() { + + String documentId = randomNumeric(5); + SampleEntity sampleEntity = SampleEntity.builder().id(documentId).message("some message") + .version(System.currentTimeMillis()).build(); + + IndexQuery indexQuery = getIndexQuery(sampleEntity); + restTemplate.index(indexQuery); + restTemplate.refresh(SampleEntity.class); + + CriteriaQuery criteriaQuery = new CriteriaQuery(Criteria.where("message").is("some message")); + + template.query(criteriaQuery, SampleEntity.class) // + .as(StepVerifier::create) // + .expectNext(sampleEntity) // + .verifyComplete(); + } + + @Test // DATAES-488 + public void findShouldReturnEmptyFluxIfNothingFound() { + + String documentId = randomNumeric(5); + SampleEntity sampleEntity = SampleEntity.builder().id(documentId).message("some message") + .version(System.currentTimeMillis()).build(); + + IndexQuery indexQuery = getIndexQuery(sampleEntity); + restTemplate.index(indexQuery); + restTemplate.refresh(SampleEntity.class); + + CriteriaQuery criteriaQuery = new CriteriaQuery(Criteria.where("message").is("foo")); + + template.query(criteriaQuery, SampleEntity.class) // + .as(StepVerifier::create) // + .verifyComplete(); + } + + private IndexQuery getIndexQuery(SampleEntity sampleEntity) { + return new IndexQueryBuilder().withId(sampleEntity.getId()).withObject(sampleEntity) + .withVersion(sampleEntity.getVersion()).build(); + } +} diff --git a/src/test/java/org/springframework/data/elasticsearch/repositories/synonym/SynonymRepositoryTests.java b/src/test/java/org/springframework/data/elasticsearch/repositories/synonym/SynonymRepositoryTests.java index 9ff9b72c2..18bbc2091 100644 --- a/src/test/java/org/springframework/data/elasticsearch/repositories/synonym/SynonymRepositoryTests.java +++ b/src/test/java/org/springframework/data/elasticsearch/repositories/synonym/SynonymRepositoryTests.java @@ -59,7 +59,7 @@ public class SynonymRepositoryTests { public void shouldDo() { //given SynonymEntity entry1 = new SynonymEntity(); - entry1.setText("Elizabeth is the English queen"); + entry1.setText("Elizabeth is the english queen"); SynonymEntity entry2 = new SynonymEntity(); entry2.setText("Other text"); diff --git a/src/test/resources/org/springframework/data/elasticsearch/client/get-by-id-no-hit.json b/src/test/resources/org/springframework/data/elasticsearch/client/get-by-id-no-hit.json new file mode 100644 index 000000000..f7fe0da6b --- /dev/null +++ b/src/test/resources/org/springframework/data/elasticsearch/client/get-by-id-no-hit.json @@ -0,0 +1,6 @@ +{ + "_index" : "twitter", + "_type" : "doc", + "_id" : "5", + "found" : false +} \ No newline at end of file diff --git a/src/test/resources/org/springframework/data/elasticsearch/client/get-by-id-ok.json b/src/test/resources/org/springframework/data/elasticsearch/client/get-by-id-ok.json new file mode 100644 index 000000000..36a32b30e --- /dev/null +++ b/src/test/resources/org/springframework/data/elasticsearch/client/get-by-id-ok.json @@ -0,0 +1,12 @@ +{ + "_index" : "twitter", + "_type" : "doc", + "_id" : "1", + "_version" : 1, + "found" : true, + "_source" : { + "user" : "kimchy", + "post_date" : "2009-11-15T13:12:00", + "message" : "Trying out Elasticsearch, so far so good?" + } +} \ No newline at end of file diff --git a/src/test/resources/org/springframework/data/elasticsearch/client/index-ok-created.json b/src/test/resources/org/springframework/data/elasticsearch/client/index-ok-created.json new file mode 100644 index 000000000..bbce92c0d --- /dev/null +++ b/src/test/resources/org/springframework/data/elasticsearch/client/index-ok-created.json @@ -0,0 +1,14 @@ +{ + "_index": "twitter", + "_type": "doc", + "_id": "10", + "_version": 1, + "result": "created", + "_shards": { + "total": 2, + "successful": 1, + "failed": 0 + }, + "_seq_no": 0, + "_primary_term": 1 +} diff --git a/src/test/resources/org/springframework/data/elasticsearch/client/index-ok-updated.json b/src/test/resources/org/springframework/data/elasticsearch/client/index-ok-updated.json new file mode 100644 index 000000000..22ded05af --- /dev/null +++ b/src/test/resources/org/springframework/data/elasticsearch/client/index-ok-updated.json @@ -0,0 +1,14 @@ +{ + "_index" : "twitter", + "_type" : "doc", + "_id" : "1", + "_version" : 2, + "result" : "updated", + "_shards" : { + "total" : 2, + "successful" : 1, + "failed" : 0 + }, + "_seq_no" : 1, + "_primary_term" : 1 +} \ No newline at end of file diff --git a/src/test/resources/org/springframework/data/elasticsearch/client/info.json b/src/test/resources/org/springframework/data/elasticsearch/client/info.json new file mode 100644 index 000000000..a008f4ac2 --- /dev/null +++ b/src/test/resources/org/springframework/data/elasticsearch/client/info.json @@ -0,0 +1,17 @@ +{ + "cluster_name": "elasticsearch", + "cluster_uuid": "r1tpSEemQZiSVJbfAqOLjg", + "name": "T14BIoj", + "tagline": "You Know, for Search", + "version": { + "build_date": "2018-08-17T23:18:47.308994Z", + "build_flavor": "default", + "build_hash": "595516e", + "build_snapshot": false, + "build_type": "tar", + "lucene_version": "7.4.0", + "minimum_index_compatibility_version": "5.0.0", + "minimum_wire_compatibility_version": "5.6.0", + "number": "6.4.0" + } +} \ No newline at end of file diff --git a/src/test/resources/org/springframework/data/elasticsearch/client/multi-get-ok-2-hits-1-unavailable.json b/src/test/resources/org/springframework/data/elasticsearch/client/multi-get-ok-2-hits-1-unavailable.json new file mode 100644 index 000000000..cd912c4ee --- /dev/null +++ b/src/test/resources/org/springframework/data/elasticsearch/client/multi-get-ok-2-hits-1-unavailable.json @@ -0,0 +1,34 @@ +{ + "docs": [ + { + "_index": "twitter", + "_type": "doc", + "_id": "1", + "_version": 1, + "found": true, + "_source": { + "user": "kimchy", + "post_date": "2009-11-15T13:12:00", + "message": "Trying out Elasticsearch, so far so good?" + } + }, + { + "_index": "twitter", + "_type": "_doc", + "_id": "2", + "found": false + }, + { + "_index": "twitter", + "_type": "doc", + "_id": "3", + "_version": 1, + "found": true, + "_source": { + "user": "elastic", + "post_date": "2010-01-15T01:46:38", + "message": "Building the site, should be kewl" + } + } + ] +} \ No newline at end of file diff --git a/src/test/resources/org/springframework/data/elasticsearch/client/multi-get-ok-2-hits.json b/src/test/resources/org/springframework/data/elasticsearch/client/multi-get-ok-2-hits.json new file mode 100644 index 000000000..809d670ef --- /dev/null +++ b/src/test/resources/org/springframework/data/elasticsearch/client/multi-get-ok-2-hits.json @@ -0,0 +1,28 @@ +{ + "docs": [ + { + "_index": "twitter", + "_type": "doc", + "_id": "1", + "_version": 1, + "found": true, + "_source": { + "user": "kimchy", + "post_date": "2009-11-15T13:12:00", + "message": "Trying out Elasticsearch, so far so good?" + } + }, + { + "_index": "twitter", + "_type": "doc", + "_id": "2", + "_version": 1, + "found": true, + "_source": { + "user": "kimchy", + "post_date": "2009-11-15T14:12:12", + "message": "Another tweet, will it be indexed?" + } + } + ] +} \ No newline at end of file diff --git a/src/test/resources/org/springframework/data/elasticsearch/client/search-ok-multiple-hits.json b/src/test/resources/org/springframework/data/elasticsearch/client/search-ok-multiple-hits.json new file mode 100644 index 000000000..6cb824540 --- /dev/null +++ b/src/test/resources/org/springframework/data/elasticsearch/client/search-ok-multiple-hits.json @@ -0,0 +1,38 @@ +{ + "took" : 52, + "timed_out" : false, + "_shards" : { + "total" : 5, + "successful" : 5, + "skipped" : 0, + "failed" : 0 + }, + "hits" : { + "total" : 2, + "max_score" : 0.2876821, + "hits" : [ + { + "_index" : "twitter", + "_type" : "doc", + "_id" : "2", + "_score" : 0.2876821, + "_source" : { + "user" : "kimchy", + "post_date" : "2009-11-15T14:12:12", + "message" : "Another tweet, will it be indexed?" + } + }, + { + "_index" : "twitter", + "_type" : "doc", + "_id" : "1", + "_score" : 0.2876821, + "_source" : { + "user" : "kimchy", + "post_date" : "2009-11-15T13:12:00", + "message" : "Trying out Elasticsearch, so far so good?" + } + } + ] + } +} \ No newline at end of file diff --git a/src/test/resources/org/springframework/data/elasticsearch/client/search-ok-no-hits.json b/src/test/resources/org/springframework/data/elasticsearch/client/search-ok-no-hits.json new file mode 100644 index 000000000..f2dccecd6 --- /dev/null +++ b/src/test/resources/org/springframework/data/elasticsearch/client/search-ok-no-hits.json @@ -0,0 +1,15 @@ +{ + "took" : 226, + "timed_out" : false, + "_shards" : { + "total" : 5, + "successful" : 5, + "skipped" : 0, + "failed" : 0 + }, + "hits" : { + "total" : 0, + "max_score" : null, + "hits" : [ ] + } +} \ No newline at end of file diff --git a/src/test/resources/org/springframework/data/elasticsearch/client/search-ok-single-hit.json b/src/test/resources/org/springframework/data/elasticsearch/client/search-ok-single-hit.json new file mode 100644 index 000000000..abf1be138 --- /dev/null +++ b/src/test/resources/org/springframework/data/elasticsearch/client/search-ok-single-hit.json @@ -0,0 +1,27 @@ +{ + "took" : 52, + "timed_out" : false, + "_shards" : { + "total" : 5, + "successful" : 5, + "skipped" : 0, + "failed" : 0 + }, + "hits" : { + "total" : 1, + "max_score" : 0.2876821, + "hits" : [ + { + "_index" : "twitter", + "_type" : "doc", + "_id" : "2", + "_score" : 0.2876821, + "_source" : { + "user" : "kimchy", + "post_date" : "2009-11-15T14:12:12", + "message" : "Another tweet, will it be indexed?" + } + } + ] + } +} \ No newline at end of file diff --git a/src/test/resources/org/springframework/data/elasticsearch/client/update-error-not-found.json b/src/test/resources/org/springframework/data/elasticsearch/client/update-error-not-found.json new file mode 100644 index 000000000..d86719888 --- /dev/null +++ b/src/test/resources/org/springframework/data/elasticsearch/client/update-error-not-found.json @@ -0,0 +1,19 @@ +{ + "error": { + "index": "twitter", + "index_uuid": "C91lAFXcRR6GTpYv6QIJFQ", + "reason": "[doc][101]: document missing", + "root_cause": [ + { + "index": "twitter", + "index_uuid": "C91lAFXcRR6GTpYv6QIJFQ", + "reason": "[doc][101]: document missing", + "shard": "1", + "type": "document_missing_exception" + } + ], + "shard": "1", + "type": "document_missing_exception" + }, + "status": 404 +} \ No newline at end of file diff --git a/src/test/resources/org/springframework/data/elasticsearch/client/update-ok-deleted.json b/src/test/resources/org/springframework/data/elasticsearch/client/update-ok-deleted.json new file mode 100644 index 000000000..f850ffc1a --- /dev/null +++ b/src/test/resources/org/springframework/data/elasticsearch/client/update-ok-deleted.json @@ -0,0 +1,14 @@ +{ + "_id": "1", + "_index": "twitter", + "_primary_term": 4, + "_seq_no": 2, + "_shards": { + "failed": 0, + "successful": 1, + "total": 2 + }, + "_type": "doc", + "_version": 1, + "result": "deleted" +} \ No newline at end of file diff --git a/src/test/resources/org/springframework/data/elasticsearch/client/update-ok-updated.json b/src/test/resources/org/springframework/data/elasticsearch/client/update-ok-updated.json new file mode 100644 index 000000000..c028ded2f --- /dev/null +++ b/src/test/resources/org/springframework/data/elasticsearch/client/update-ok-updated.json @@ -0,0 +1,14 @@ +{ + "_index": "twitter", + "_type": "doc", + "_id": "1", + "_version": 2, + "result": "updated", + "_shards": { + "total": 2, + "successful": 1, + "failed": 0 + }, + "_seq_no": 2, + "_primary_term": 4 +} \ No newline at end of file diff --git a/src/test/resources/synonyms/settings.json b/src/test/resources/synonyms/settings.json index d53189fb5..1587fc6b6 100644 --- a/src/test/resources/synonyms/settings.json +++ b/src/test/resources/synonyms/settings.json @@ -14,13 +14,13 @@ "filter": { "synonym_filter": { "type": "synonym", + "lenient" : true, "synonyms": [ "british,english", "queen,monarch" - ], - "ignore_case": "true" + ] } } } } -} \ No newline at end of file +} diff --git a/src/test/resources/test-home-dir/modules/analysis-common/analysis-common-6.5.0.jar b/src/test/resources/test-home-dir/modules/analysis-common/analysis-common-6.5.0.jar new file mode 100644 index 000000000..1419c9a07 Binary files /dev/null and b/src/test/resources/test-home-dir/modules/analysis-common/analysis-common-6.5.0.jar differ diff --git a/src/test/resources/test-home-dir/modules/analysis-common/plugin-descriptor.properties b/src/test/resources/test-home-dir/modules/analysis-common/plugin-descriptor.properties new file mode 100644 index 000000000..a6a0ae9dd --- /dev/null +++ b/src/test/resources/test-home-dir/modules/analysis-common/plugin-descriptor.properties @@ -0,0 +1,45 @@ +# Elasticsearch plugin descriptor file +# This file must exist as 'plugin-descriptor.properties' inside a plugin. +# +### example plugin for "foo" +# +# foo.zip <-- zip file for the plugin, with this structure: +# |____ .jar <-- classes, resources, dependencies +# |____ .jar <-- any number of jars +# |____ plugin-descriptor.properties <-- example contents below: +# +# classname=foo.bar.BazPlugin +# description=My cool plugin +# version=6.0 +# elasticsearch.version=6.0 +# java.version=1.8 +# +### mandatory elements for all plugins: +# +# 'description': simple summary of the plugin +description=Adds "built in" analyzers to Elasticsearch. +# +# 'version': plugin's version +version=6.5.0 +# +# 'name': the plugin name +name=analysis-common +# +# 'classname': the name of the class to load, fully-qualified. +classname=org.elasticsearch.analysis.common.CommonAnalysisPlugin +# +# 'java.version': version of java the code is built against +# use the system property java.specification.version +# version string must be a sequence of nonnegative decimal integers +# separated by "."'s and may have leading zeros +java.version=1.8 +# +# 'elasticsearch.version': version of elasticsearch compiled against +elasticsearch.version=6.5.0 +### optional elements for plugins: +# +# 'extended.plugins': other plugins this plugin extends through SPI +extended.plugins=lang-painless +# +# 'has.native.controller': whether or not the plugin has a native controller +has.native.controller=false diff --git a/src/test/resources/test-home-dir/modules/ingest-common/elasticsearch-dissect-6.5.0.jar b/src/test/resources/test-home-dir/modules/ingest-common/elasticsearch-dissect-6.5.0.jar new file mode 100644 index 000000000..faae21381 Binary files /dev/null and b/src/test/resources/test-home-dir/modules/ingest-common/elasticsearch-dissect-6.5.0.jar differ diff --git a/src/test/resources/test-home-dir/modules/ingest-common/elasticsearch-grok-6.5.0.jar b/src/test/resources/test-home-dir/modules/ingest-common/elasticsearch-grok-6.5.0.jar new file mode 100644 index 000000000..e7f166be4 Binary files /dev/null and b/src/test/resources/test-home-dir/modules/ingest-common/elasticsearch-grok-6.5.0.jar differ diff --git a/src/test/resources/test-home-dir/modules/ingest-common/ingest-common-6.5.0.jar b/src/test/resources/test-home-dir/modules/ingest-common/ingest-common-6.5.0.jar new file mode 100644 index 000000000..5e7c81652 Binary files /dev/null and b/src/test/resources/test-home-dir/modules/ingest-common/ingest-common-6.5.0.jar differ diff --git a/src/test/resources/test-home-dir/modules/ingest-common/jcodings-1.0.12.jar b/src/test/resources/test-home-dir/modules/ingest-common/jcodings-1.0.12.jar new file mode 100644 index 000000000..5493b50b4 Binary files /dev/null and b/src/test/resources/test-home-dir/modules/ingest-common/jcodings-1.0.12.jar differ diff --git a/src/test/resources/test-home-dir/modules/ingest-common/joni-2.1.6.jar b/src/test/resources/test-home-dir/modules/ingest-common/joni-2.1.6.jar new file mode 100644 index 000000000..7ec4d1507 Binary files /dev/null and b/src/test/resources/test-home-dir/modules/ingest-common/joni-2.1.6.jar differ diff --git a/src/test/resources/test-home-dir/modules/ingest-common/plugin-descriptor.properties b/src/test/resources/test-home-dir/modules/ingest-common/plugin-descriptor.properties new file mode 100644 index 000000000..8de780c98 --- /dev/null +++ b/src/test/resources/test-home-dir/modules/ingest-common/plugin-descriptor.properties @@ -0,0 +1,45 @@ +# Elasticsearch plugin descriptor file +# This file must exist as 'plugin-descriptor.properties' inside a plugin. +# +### example plugin for "foo" +# +# foo.zip <-- zip file for the plugin, with this structure: +# |____ .jar <-- classes, resources, dependencies +# |____ .jar <-- any number of jars +# |____ plugin-descriptor.properties <-- example contents below: +# +# classname=foo.bar.BazPlugin +# description=My cool plugin +# version=6.0 +# elasticsearch.version=6.0 +# java.version=1.8 +# +### mandatory elements for all plugins: +# +# 'description': simple summary of the plugin +description=Module for ingest processors that do not require additional security permissions or have large dependencies and resources +# +# 'version': plugin's version +version=6.5.0 +# +# 'name': the plugin name +name=ingest-common +# +# 'classname': the name of the class to load, fully-qualified. +classname=org.elasticsearch.ingest.common.IngestCommonPlugin +# +# 'java.version': version of java the code is built against +# use the system property java.specification.version +# version string must be a sequence of nonnegative decimal integers +# separated by "."'s and may have leading zeros +java.version=1.8 +# +# 'elasticsearch.version': version of elasticsearch compiled against +elasticsearch.version=6.5.0 +### optional elements for plugins: +# +# 'extended.plugins': other plugins this plugin extends through SPI +extended.plugins=lang-painless +# +# 'has.native.controller': whether or not the plugin has a native controller +has.native.controller=false diff --git a/src/test/resources/test-home-dir/modules/lang-expression/lang-expression-6.2.2.jar b/src/test/resources/test-home-dir/modules/lang-expression/lang-expression-6.2.2.jar deleted file mode 100644 index 521648ddf..000000000 Binary files a/src/test/resources/test-home-dir/modules/lang-expression/lang-expression-6.2.2.jar and /dev/null differ diff --git a/src/test/resources/test-home-dir/modules/lang-expression/lang-expression-6.5.0.jar b/src/test/resources/test-home-dir/modules/lang-expression/lang-expression-6.5.0.jar new file mode 100644 index 000000000..32571b0a0 Binary files /dev/null and b/src/test/resources/test-home-dir/modules/lang-expression/lang-expression-6.5.0.jar differ diff --git a/src/test/resources/test-home-dir/modules/lang-expression/lucene-expressions-7.2.1.jar b/src/test/resources/test-home-dir/modules/lang-expression/lucene-expressions-7.5.0.jar similarity index 82% rename from src/test/resources/test-home-dir/modules/lang-expression/lucene-expressions-7.2.1.jar rename to src/test/resources/test-home-dir/modules/lang-expression/lucene-expressions-7.5.0.jar index a0e0b7be3..d2b8033c3 100644 Binary files a/src/test/resources/test-home-dir/modules/lang-expression/lucene-expressions-7.2.1.jar and b/src/test/resources/test-home-dir/modules/lang-expression/lucene-expressions-7.5.0.jar differ diff --git a/src/test/resources/test-home-dir/modules/lang-expression/plugin-descriptor.properties b/src/test/resources/test-home-dir/modules/lang-expression/plugin-descriptor.properties index fdae6037d..2ff0578d3 100644 --- a/src/test/resources/test-home-dir/modules/lang-expression/plugin-descriptor.properties +++ b/src/test/resources/test-home-dir/modules/lang-expression/plugin-descriptor.properties @@ -1,20 +1,18 @@ # Elasticsearch plugin descriptor file -# This file must exist as 'plugin-descriptor.properties' in a folder named `elasticsearch` -# inside all plugins. +# This file must exist as 'plugin-descriptor.properties' inside a plugin. # ### example plugin for "foo" # # foo.zip <-- zip file for the plugin, with this structure: -#|____elasticsearch/ -#| |____ .jar <-- classes, resources, dependencies -#| |____ .jar <-- any number of jars -#| |____ plugin-descriptor.properties <-- example contents below: +# |____ .jar <-- classes, resources, dependencies +# |____ .jar <-- any number of jars +# |____ plugin-descriptor.properties <-- example contents below: # # classname=foo.bar.BazPlugin # description=My cool plugin -# version=2.0 -# elasticsearch.version=2.0 -# java.version=1.7 +# version=6.0 +# elasticsearch.version=6.0 +# java.version=1.8 # ### mandatory elements for all plugins: # @@ -22,7 +20,7 @@ description=Lucene expressions integration for Elasticsearch # # 'version': plugin's version -version=6.2.2 +version=6.5.0 # # 'name': the plugin name name=lang-expression @@ -37,7 +35,7 @@ classname=org.elasticsearch.script.expression.ExpressionPlugin java.version=1.8 # # 'elasticsearch.version': version of elasticsearch compiled against -elasticsearch.version=6.3.0 +elasticsearch.version=6.5.0 ### optional elements for plugins: # # 'extended.plugins': other plugins this plugin extends through SPI @@ -45,6 +43,3 @@ extended.plugins= # # 'has.native.controller': whether or not the plugin has a native controller has.native.controller=false -# -# 'requires.keystore': whether or not the plugin needs the elasticsearch keystore be created -#requires.keystore=false diff --git a/src/test/resources/test-home-dir/modules/lang-painless/antlr4-runtime-4.5.3.jar b/src/test/resources/test-home-dir/modules/lang-painless/antlr4-runtime-4.5.3.jar new file mode 100644 index 000000000..44353757e Binary files /dev/null and b/src/test/resources/test-home-dir/modules/lang-painless/antlr4-runtime-4.5.3.jar differ diff --git a/src/test/resources/test-home-dir/modules/lang-painless/asm-debug-all-5.1.jar b/src/test/resources/test-home-dir/modules/lang-painless/asm-debug-all-5.1.jar new file mode 100644 index 000000000..34b7bfada Binary files /dev/null and b/src/test/resources/test-home-dir/modules/lang-painless/asm-debug-all-5.1.jar differ diff --git a/src/test/resources/test-home-dir/modules/lang-painless/elasticsearch-scripting-painless-spi-6.5.0.jar b/src/test/resources/test-home-dir/modules/lang-painless/elasticsearch-scripting-painless-spi-6.5.0.jar new file mode 100644 index 000000000..61af88ee8 Binary files /dev/null and b/src/test/resources/test-home-dir/modules/lang-painless/elasticsearch-scripting-painless-spi-6.5.0.jar differ diff --git a/src/test/resources/test-home-dir/modules/lang-painless/lang-painless-6.5.0.jar b/src/test/resources/test-home-dir/modules/lang-painless/lang-painless-6.5.0.jar new file mode 100644 index 000000000..328eb54af Binary files /dev/null and b/src/test/resources/test-home-dir/modules/lang-painless/lang-painless-6.5.0.jar differ diff --git a/src/test/resources/test-home-dir/modules/lang-painless/plugin-descriptor.properties b/src/test/resources/test-home-dir/modules/lang-painless/plugin-descriptor.properties new file mode 100644 index 000000000..1a7b244c8 --- /dev/null +++ b/src/test/resources/test-home-dir/modules/lang-painless/plugin-descriptor.properties @@ -0,0 +1,45 @@ +# Elasticsearch plugin descriptor file +# This file must exist as 'plugin-descriptor.properties' inside a plugin. +# +### example plugin for "foo" +# +# foo.zip <-- zip file for the plugin, with this structure: +# |____ .jar <-- classes, resources, dependencies +# |____ .jar <-- any number of jars +# |____ plugin-descriptor.properties <-- example contents below: +# +# classname=foo.bar.BazPlugin +# description=My cool plugin +# version=6.0 +# elasticsearch.version=6.0 +# java.version=1.8 +# +### mandatory elements for all plugins: +# +# 'description': simple summary of the plugin +description=An easy, safe and fast scripting language for Elasticsearch +# +# 'version': plugin's version +version=6.5.0 +# +# 'name': the plugin name +name=lang-painless +# +# 'classname': the name of the class to load, fully-qualified. +classname=org.elasticsearch.painless.PainlessPlugin +# +# 'java.version': version of java the code is built against +# use the system property java.specification.version +# version string must be a sequence of nonnegative decimal integers +# separated by "."'s and may have leading zeros +java.version=1.8 +# +# 'elasticsearch.version': version of elasticsearch compiled against +elasticsearch.version=6.5.0 +### optional elements for plugins: +# +# 'extended.plugins': other plugins this plugin extends through SPI +extended.plugins= +# +# 'has.native.controller': whether or not the plugin has a native controller +has.native.controller=false diff --git a/src/test/resources/test-home-dir/modules/lang-painless/plugin-security.policy b/src/test/resources/test-home-dir/modules/lang-painless/plugin-security.policy new file mode 100644 index 000000000..b383c6da3 --- /dev/null +++ b/src/test/resources/test-home-dir/modules/lang-painless/plugin-security.policy @@ -0,0 +1,26 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +grant { + // needed to generate runtime classes + permission java.lang.RuntimePermission "createClassLoader"; + + // needed to find the classloader to load whitelisted classes from + permission java.lang.RuntimePermission "getClassLoader"; +}; diff --git a/src/test/resources/test-home-dir/modules/mapper-extras/mapper-extras-6.5.0.jar b/src/test/resources/test-home-dir/modules/mapper-extras/mapper-extras-6.5.0.jar new file mode 100644 index 000000000..b73ba7bec Binary files /dev/null and b/src/test/resources/test-home-dir/modules/mapper-extras/mapper-extras-6.5.0.jar differ diff --git a/src/test/resources/test-home-dir/modules/mapper-extras/plugin-descriptor.properties b/src/test/resources/test-home-dir/modules/mapper-extras/plugin-descriptor.properties new file mode 100644 index 000000000..9270f9b18 --- /dev/null +++ b/src/test/resources/test-home-dir/modules/mapper-extras/plugin-descriptor.properties @@ -0,0 +1,45 @@ +# Elasticsearch plugin descriptor file +# This file must exist as 'plugin-descriptor.properties' inside a plugin. +# +### example plugin for "foo" +# +# foo.zip <-- zip file for the plugin, with this structure: +# |____ .jar <-- classes, resources, dependencies +# |____ .jar <-- any number of jars +# |____ plugin-descriptor.properties <-- example contents below: +# +# classname=foo.bar.BazPlugin +# description=My cool plugin +# version=6.0 +# elasticsearch.version=6.0 +# java.version=1.8 +# +### mandatory elements for all plugins: +# +# 'description': simple summary of the plugin +description=Adds advanced field mappers +# +# 'version': plugin's version +version=6.5.0 +# +# 'name': the plugin name +name=mapper-extras +# +# 'classname': the name of the class to load, fully-qualified. +classname=org.elasticsearch.index.mapper.MapperExtrasPlugin +# +# 'java.version': version of java the code is built against +# use the system property java.specification.version +# version string must be a sequence of nonnegative decimal integers +# separated by "."'s and may have leading zeros +java.version=1.8 +# +# 'elasticsearch.version': version of elasticsearch compiled against +elasticsearch.version=6.5.0 +### optional elements for plugins: +# +# 'extended.plugins': other plugins this plugin extends through SPI +extended.plugins= +# +# 'has.native.controller': whether or not the plugin has a native controller +has.native.controller=false diff --git a/src/test/resources/test-home-dir/modules/repository-url/plugin-descriptor.properties b/src/test/resources/test-home-dir/modules/repository-url/plugin-descriptor.properties new file mode 100644 index 000000000..cbced6899 --- /dev/null +++ b/src/test/resources/test-home-dir/modules/repository-url/plugin-descriptor.properties @@ -0,0 +1,45 @@ +# Elasticsearch plugin descriptor file +# This file must exist as 'plugin-descriptor.properties' inside a plugin. +# +### example plugin for "foo" +# +# foo.zip <-- zip file for the plugin, with this structure: +# |____ .jar <-- classes, resources, dependencies +# |____ .jar <-- any number of jars +# |____ plugin-descriptor.properties <-- example contents below: +# +# classname=foo.bar.BazPlugin +# description=My cool plugin +# version=6.0 +# elasticsearch.version=6.0 +# java.version=1.8 +# +### mandatory elements for all plugins: +# +# 'description': simple summary of the plugin +description=Module for URL repository +# +# 'version': plugin's version +version=6.5.0 +# +# 'name': the plugin name +name=repository-url +# +# 'classname': the name of the class to load, fully-qualified. +classname=org.elasticsearch.plugin.repository.url.URLRepositoryPlugin +# +# 'java.version': version of java the code is built against +# use the system property java.specification.version +# version string must be a sequence of nonnegative decimal integers +# separated by "."'s and may have leading zeros +java.version=1.8 +# +# 'elasticsearch.version': version of elasticsearch compiled against +elasticsearch.version=6.5.0 +### optional elements for plugins: +# +# 'extended.plugins': other plugins this plugin extends through SPI +extended.plugins= +# +# 'has.native.controller': whether or not the plugin has a native controller +has.native.controller=false diff --git a/src/test/resources/test-home-dir/modules/repository-url/plugin-security.policy b/src/test/resources/test-home-dir/modules/repository-url/plugin-security.policy new file mode 100644 index 000000000..b878c4857 --- /dev/null +++ b/src/test/resources/test-home-dir/modules/repository-url/plugin-security.policy @@ -0,0 +1,22 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +grant { + permission java.net.SocketPermission "*", "connect"; +}; diff --git a/src/test/resources/test-home-dir/modules/repository-url/repository-url-6.5.0.jar b/src/test/resources/test-home-dir/modules/repository-url/repository-url-6.5.0.jar new file mode 100644 index 000000000..2f6e55496 Binary files /dev/null and b/src/test/resources/test-home-dir/modules/repository-url/repository-url-6.5.0.jar differ