Merge branch 'master' into feature/rank-eval
This commit is contained in:
commit
cfa52f8b9a
|
@ -1,5 +1,5 @@
|
|||
Elasticsearch
|
||||
Copyright 2009-2016 Elasticsearch
|
||||
Copyright 2009-2017 Elasticsearch
|
||||
|
||||
This product includes software developed by The Apache Software
|
||||
Foundation (http://www.apache.org/).
|
||||
|
|
|
@ -50,16 +50,16 @@ h3. Indexing
|
|||
Let's try and index some twitter like information. First, let's create a twitter user, and add some tweets (the @twitter@ index will be created automatically):
|
||||
|
||||
<pre>
|
||||
curl -XPUT 'http://localhost:9200/twitter/user/kimchy?pretty' -d '{ "name" : "Shay Banon" }'
|
||||
curl -XPUT 'http://localhost:9200/twitter/user/kimchy?pretty' -H 'Content-Type: application/json' -d '{ "name" : "Shay Banon" }'
|
||||
|
||||
curl -XPUT 'http://localhost:9200/twitter/tweet/1?pretty' -d '
|
||||
curl -XPUT 'http://localhost:9200/twitter/tweet/1?pretty' -H 'Content-Type: application/json' -d '
|
||||
{
|
||||
"user": "kimchy",
|
||||
"post_date": "2009-11-15T13:12:00",
|
||||
"message": "Trying out Elasticsearch, so far so good?"
|
||||
}'
|
||||
|
||||
curl -XPUT 'http://localhost:9200/twitter/tweet/2?pretty' -d '
|
||||
curl -XPUT 'http://localhost:9200/twitter/tweet/2?pretty' -H 'Content-Type: application/json' -d '
|
||||
{
|
||||
"user": "kimchy",
|
||||
"post_date": "2009-11-15T14:12:12",
|
||||
|
@ -87,7 +87,7 @@ curl -XGET 'http://localhost:9200/twitter/tweet/_search?q=user:kimchy&pretty=tru
|
|||
We can also use the JSON query language Elasticsearch provides instead of a query string:
|
||||
|
||||
<pre>
|
||||
curl -XGET 'http://localhost:9200/twitter/tweet/_search?pretty=true' -d '
|
||||
curl -XGET 'http://localhost:9200/twitter/tweet/_search?pretty=true' -H 'Content-Type: application/json' -d '
|
||||
{
|
||||
"query" : {
|
||||
"match" : { "user": "kimchy" }
|
||||
|
@ -98,7 +98,7 @@ curl -XGET 'http://localhost:9200/twitter/tweet/_search?pretty=true' -d '
|
|||
Just for kicks, let's get all the documents stored (we should see the user as well):
|
||||
|
||||
<pre>
|
||||
curl -XGET 'http://localhost:9200/twitter/_search?pretty=true' -d '
|
||||
curl -XGET 'http://localhost:9200/twitter/_search?pretty=true' -H 'Content-Type: application/json' -d '
|
||||
{
|
||||
"query" : {
|
||||
"match_all" : {}
|
||||
|
@ -109,7 +109,7 @@ curl -XGET 'http://localhost:9200/twitter/_search?pretty=true' -d '
|
|||
We can also do range search (the @postDate@ was automatically identified as date)
|
||||
|
||||
<pre>
|
||||
curl -XGET 'http://localhost:9200/twitter/_search?pretty=true' -d '
|
||||
curl -XGET 'http://localhost:9200/twitter/_search?pretty=true' -H 'Content-Type: application/json' -d '
|
||||
{
|
||||
"query" : {
|
||||
"range" : {
|
||||
|
@ -130,16 +130,16 @@ Elasticsearch supports multiple indices, as well as multiple types per index. In
|
|||
Another way to define our simple twitter system is to have a different index per user (note, though that each index has an overhead). Here is the indexing curl's in this case:
|
||||
|
||||
<pre>
|
||||
curl -XPUT 'http://localhost:9200/kimchy/info/1?pretty' -d '{ "name" : "Shay Banon" }'
|
||||
curl -XPUT 'http://localhost:9200/kimchy/info/1?pretty' -H 'Content-Type: application/json' -d '{ "name" : "Shay Banon" }'
|
||||
|
||||
curl -XPUT 'http://localhost:9200/kimchy/tweet/1?pretty' -d '
|
||||
curl -XPUT 'http://localhost:9200/kimchy/tweet/1?pretty' -H 'Content-Type: application/json' -d '
|
||||
{
|
||||
"user": "kimchy",
|
||||
"post_date": "2009-11-15T13:12:00",
|
||||
"message": "Trying out Elasticsearch, so far so good?"
|
||||
}'
|
||||
|
||||
curl -XPUT 'http://localhost:9200/kimchy/tweet/2?pretty' -d '
|
||||
curl -XPUT 'http://localhost:9200/kimchy/tweet/2?pretty' -H 'Content-Type: application/json' -d '
|
||||
{
|
||||
"user": "kimchy",
|
||||
"post_date": "2009-11-15T14:12:12",
|
||||
|
@ -152,7 +152,7 @@ The above will index information into the @kimchy@ index, with two types, @info@
|
|||
Complete control on the index level is allowed. As an example, in the above case, we would want to change from the default 5 shards with 1 replica per index, to only 1 shard with 1 replica per index (== per twitter user). Here is how this can be done (the configuration can be in yaml as well):
|
||||
|
||||
<pre>
|
||||
curl -XPUT http://localhost:9200/another_user?pretty -d '
|
||||
curl -XPUT http://localhost:9200/another_user?pretty -H 'Content-Type: application/json' -d '
|
||||
{
|
||||
"index" : {
|
||||
"number_of_shards" : 1,
|
||||
|
@ -165,7 +165,7 @@ Search (and similar operations) are multi index aware. This means that we can ea
|
|||
index (twitter user), for example:
|
||||
|
||||
<pre>
|
||||
curl -XGET 'http://localhost:9200/kimchy,another_user/_search?pretty=true' -d '
|
||||
curl -XGET 'http://localhost:9200/kimchy,another_user/_search?pretty=true' -H 'Content-Type: application/json' -d '
|
||||
{
|
||||
"query" : {
|
||||
"match_all" : {}
|
||||
|
@ -176,7 +176,7 @@ curl -XGET 'http://localhost:9200/kimchy,another_user/_search?pretty=true' -d '
|
|||
Or on all the indices:
|
||||
|
||||
<pre>
|
||||
curl -XGET 'http://localhost:9200/_search?pretty=true' -d '
|
||||
curl -XGET 'http://localhost:9200/_search?pretty=true' -H 'Content-Type: application/json' -d '
|
||||
{
|
||||
"query" : {
|
||||
"match_all" : {}
|
||||
|
|
|
@ -176,10 +176,8 @@ end
|
|||
|
||||
def sles_common(config)
|
||||
extra = <<-SHELL
|
||||
zypper rr systemsmanagement_puppet
|
||||
zypper rr systemsmanagement_puppet puppetlabs-pc1
|
||||
zypper addrepo -t yast2 http://demeter.uni-regensburg.de/SLES12-x64/DVD1/ dvd1 || true
|
||||
zypper addrepo -t yast2 http://demeter.uni-regensburg.de/SLES12-x64/DVD2/ dvd2 || true
|
||||
zypper addrepo http://download.opensuse.org/repositories/Java:Factory/SLE_12/Java:Factory.repo || true
|
||||
zypper --no-gpg-checks --non-interactive refresh
|
||||
zypper --non-interactive install git-core
|
||||
SHELL
|
||||
|
|
|
@ -92,7 +92,7 @@ dependencies {
|
|||
compile 'com.netflix.nebula:gradle-info-plugin:3.0.3'
|
||||
compile 'org.eclipse.jgit:org.eclipse.jgit:3.2.0.201312181205-r'
|
||||
compile 'com.perforce:p4java:2012.3.551082' // THIS IS SUPPOSED TO BE OPTIONAL IN THE FUTURE....
|
||||
compile 'de.thetaphi:forbiddenapis:2.2'
|
||||
compile 'de.thetaphi:forbiddenapis:2.3'
|
||||
compile 'org.apache.rat:apache-rat:0.11'
|
||||
}
|
||||
|
||||
|
@ -122,6 +122,9 @@ if (GradleVersion.current() == GradleVersion.version("2.13")) {
|
|||
if (project == rootProject) {
|
||||
|
||||
repositories {
|
||||
if (System.getProperty("repos.mavenLocal") != null) {
|
||||
mavenLocal()
|
||||
}
|
||||
mavenCentral()
|
||||
}
|
||||
test.exclude 'org/elasticsearch/test/NamingConventionsCheckBadClasses*'
|
||||
|
|
|
@ -0,0 +1,78 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.gradle
|
||||
|
||||
import org.gradle.api.DefaultTask
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.artifacts.Configuration
|
||||
import org.gradle.api.tasks.OutputFile
|
||||
import org.gradle.api.tasks.TaskAction
|
||||
|
||||
/**
|
||||
* A task to create a notice file which includes dependencies' notices.
|
||||
*/
|
||||
public class NoticeTask extends DefaultTask {
|
||||
|
||||
@OutputFile
|
||||
File noticeFile = new File(project.buildDir, "notices/${name}/NOTICE.txt")
|
||||
|
||||
/** Configurations to inspect dependencies*/
|
||||
private List<Project> dependencies = new ArrayList<>()
|
||||
|
||||
public NoticeTask() {
|
||||
description = 'Create a notice file from dependencies'
|
||||
}
|
||||
|
||||
/** Add notices from licenses found in the given project. */
|
||||
public void dependencies(Project project) {
|
||||
dependencies.add(project)
|
||||
}
|
||||
|
||||
@TaskAction
|
||||
public void generateNotice() {
|
||||
StringBuilder output = new StringBuilder()
|
||||
output.append(project.rootProject.file('NOTICE.txt').getText('UTF-8'))
|
||||
output.append('\n\n')
|
||||
Set<String> seen = new HashSet<>()
|
||||
for (Project dep : dependencies) {
|
||||
File licensesDir = new File(dep.projectDir, 'licenses')
|
||||
if (licensesDir.exists() == false) continue
|
||||
licensesDir.eachFileMatch({ it ==~ /.*-NOTICE\.txt/ && seen.contains(it) == false}) { File file ->
|
||||
String name = file.name.substring(0, file.name.length() - '-NOTICE.txt'.length())
|
||||
appendFile(file, name, 'NOTICE', output)
|
||||
appendFile(new File(file.parentFile, "${name}-LICENSE.txt"), name, 'LICENSE', output)
|
||||
seen.add(file.name)
|
||||
}
|
||||
}
|
||||
noticeFile.setText(output.toString(), 'UTF-8')
|
||||
}
|
||||
|
||||
static void appendFile(File file, String name, String type, StringBuilder output) {
|
||||
String text = file.getText('UTF-8')
|
||||
if (text.trim().isEmpty()) {
|
||||
return
|
||||
}
|
||||
output.append('================================================================================\n')
|
||||
output.append("${name} ${type}\n")
|
||||
output.append('================================================================================\n')
|
||||
output.append(text)
|
||||
output.append('\n\n')
|
||||
}
|
||||
}
|
|
@ -19,6 +19,7 @@
|
|||
package org.elasticsearch.gradle.plugin
|
||||
|
||||
import org.elasticsearch.gradle.BuildPlugin
|
||||
import org.elasticsearch.gradle.NoticeTask
|
||||
import org.elasticsearch.gradle.test.RestIntegTestTask
|
||||
import org.elasticsearch.gradle.test.RunTask
|
||||
import org.gradle.api.Project
|
||||
|
@ -71,6 +72,7 @@ public class PluginBuildPlugin extends BuildPlugin {
|
|||
project.integTest.clusterConfig.plugin(project.path)
|
||||
project.tasks.run.clusterConfig.plugin(project.path)
|
||||
addZipPomGeneration(project)
|
||||
addNoticeGeneration(project)
|
||||
}
|
||||
|
||||
project.namingConventions {
|
||||
|
@ -244,4 +246,23 @@ public class PluginBuildPlugin extends BuildPlugin {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected void addNoticeGeneration(Project project) {
|
||||
File licenseFile = project.pluginProperties.extension.licenseFile
|
||||
if (licenseFile != null) {
|
||||
project.bundlePlugin.into('/') {
|
||||
from(licenseFile.parentFile) {
|
||||
include(licenseFile.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
File noticeFile = project.pluginProperties.extension.licenseFile
|
||||
if (noticeFile != null) {
|
||||
NoticeTask generateNotice = project.tasks.create('generateNotice', NoticeTask.class)
|
||||
generateNotice.dependencies(project)
|
||||
project.bundlePlugin.into('/') {
|
||||
from(generateNotice)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,6 +43,17 @@ class PluginPropertiesExtension {
|
|||
@Input
|
||||
boolean hasClientJar = false
|
||||
|
||||
/** A license file that should be included in the built plugin zip. */
|
||||
@Input
|
||||
File licenseFile = null
|
||||
|
||||
/**
|
||||
* A notice file that should be included in the built plugin zip. This will be
|
||||
* extended with notices from the {@code licenses/} directory.
|
||||
*/
|
||||
@Input
|
||||
File noticeFile = null
|
||||
|
||||
PluginPropertiesExtension(Project project) {
|
||||
name = project.name
|
||||
version = project.version
|
||||
|
|
|
@ -16,8 +16,10 @@ class VagrantTestPlugin implements Plugin<Project> {
|
|||
static List<String> BOXES = [
|
||||
'centos-6',
|
||||
'centos-7',
|
||||
'debian-8',
|
||||
'fedora-24',
|
||||
// TODO: re-enable debian once it does not have broken openjdk packages
|
||||
//'debian-8',
|
||||
// TODO: re-enable fedora once it does not have broken openjdk packages
|
||||
//'fedora-24',
|
||||
'oel-6',
|
||||
'oel-7',
|
||||
'opensuse-13',
|
||||
|
|
|
@ -167,7 +167,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]IndicesOptions.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]ToXContentToBytes.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]broadcast[/\\]BroadcastOperationRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]broadcast[/\\]BroadcastResponse.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]broadcast[/\\]TransportBroadcastAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]broadcast[/\\]node[/\\]TransportBroadcastByNodeAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]master[/\\]AcknowledgedRequest.java" checks="LineLength" />
|
||||
|
@ -201,6 +200,7 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]update[/\\]UpdateRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]Bootstrap.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]JNANatives.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]JNAKernel32Library.java" checks="RedundantModifier" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]JVMCheck.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]JarHell.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]Security.java" checks="LineLength" />
|
||||
|
|
|
@ -36,3 +36,12 @@ org.apache.lucene.document.FieldType#numericType()
|
|||
java.lang.invoke.MethodHandle#invoke(java.lang.Object[])
|
||||
java.lang.invoke.MethodHandle#invokeWithArguments(java.lang.Object[])
|
||||
java.lang.invoke.MethodHandle#invokeWithArguments(java.util.List)
|
||||
|
||||
@defaultMessage Don't open socket connections
|
||||
java.net.URL#openStream()
|
||||
java.net.URLConnection#connect()
|
||||
java.net.URLConnection#getInputStream()
|
||||
java.net.Socket#connect(java.net.SocketAddress)
|
||||
java.net.Socket#connect(java.net.SocketAddress, int)
|
||||
java.nio.channels.SocketChannel#open(java.net.SocketAddress)
|
||||
java.nio.channels.SocketChannel#connect(java.net.SocketAddress)
|
|
@ -1,6 +1,6 @@
|
|||
# When updating elasticsearch, please update 'rest' version in core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy
|
||||
elasticsearch = 6.0.0-alpha1
|
||||
lucene = 6.4.0
|
||||
lucene = 6.5.0-snapshot-f919485
|
||||
|
||||
# optional dependencies
|
||||
spatial4j = 0.6
|
||||
|
|
|
@ -28,8 +28,8 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregations;
|
||||
import org.elasticsearch.search.internal.InternalSearchHit;
|
||||
import org.elasticsearch.search.internal.InternalSearchHits;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.SearchHits;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.profile.SearchProfileShardResults;
|
||||
import org.elasticsearch.search.suggest.Suggest;
|
||||
|
@ -49,8 +49,8 @@ public class TransportNoopSearchAction extends HandledTransportAction<SearchRequ
|
|||
@Override
|
||||
protected void doExecute(SearchRequest request, ActionListener<SearchResponse> listener) {
|
||||
listener.onResponse(new SearchResponse(new InternalSearchResponse(
|
||||
new InternalSearchHits(
|
||||
new InternalSearchHit[0], 0L, 0.0f),
|
||||
new SearchHits(
|
||||
new SearchHit[0], 0L, 0.0f),
|
||||
new InternalAggregations(Collections.emptyList()),
|
||||
new Suggest(Collections.emptyList()),
|
||||
new SearchProfileShardResults(Collections.emptyMap()), false, false), "", 1, 1, 0, new ShardSearchFailure[0]));
|
||||
|
|
|
@ -0,0 +1,255 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.client.methods.HttpGet;
|
||||
import org.apache.http.client.methods.HttpHead;
|
||||
import org.apache.http.client.methods.HttpPost;
|
||||
import org.apache.http.client.methods.HttpPut;
|
||||
import org.apache.http.entity.ByteArrayEntity;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.action.DocWriteRequest;
|
||||
import org.elasticsearch.action.get.GetRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.StringJoiner;
|
||||
|
||||
final class Request {
|
||||
|
||||
private static final String DELIMITER = "/";
|
||||
|
||||
final String method;
|
||||
final String endpoint;
|
||||
final Map<String, String> params;
|
||||
final HttpEntity entity;
|
||||
|
||||
Request(String method, String endpoint, Map<String, String> params, HttpEntity entity) {
|
||||
this.method = method;
|
||||
this.endpoint = endpoint;
|
||||
this.params = params;
|
||||
this.entity = entity;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "Request{" +
|
||||
"method='" + method + '\'' +
|
||||
", endpoint='" + endpoint + '\'' +
|
||||
", params=" + params +
|
||||
'}';
|
||||
}
|
||||
|
||||
static Request ping() {
|
||||
return new Request("HEAD", "/", Collections.emptyMap(), null);
|
||||
}
|
||||
|
||||
static Request exists(GetRequest getRequest) {
|
||||
Request request = get(getRequest);
|
||||
return new Request(HttpHead.METHOD_NAME, request.endpoint, request.params, null);
|
||||
}
|
||||
|
||||
static Request get(GetRequest getRequest) {
|
||||
String endpoint = endpoint(getRequest.index(), getRequest.type(), getRequest.id());
|
||||
|
||||
Params parameters = Params.builder();
|
||||
parameters.withPreference(getRequest.preference());
|
||||
parameters.withRouting(getRequest.routing());
|
||||
parameters.withParent(getRequest.parent());
|
||||
parameters.withRefresh(getRequest.refresh());
|
||||
parameters.withRealtime(getRequest.realtime());
|
||||
parameters.withStoredFields(getRequest.storedFields());
|
||||
parameters.withVersion(getRequest.version());
|
||||
parameters.withVersionType(getRequest.versionType());
|
||||
parameters.withFetchSourceContext(getRequest.fetchSourceContext());
|
||||
|
||||
return new Request(HttpGet.METHOD_NAME, endpoint, parameters.getParams(), null);
|
||||
}
|
||||
|
||||
static Request index(IndexRequest indexRequest) {
|
||||
String method = Strings.hasLength(indexRequest.id()) ? HttpPut.METHOD_NAME : HttpPost.METHOD_NAME;
|
||||
|
||||
boolean isCreate = (indexRequest.opType() == DocWriteRequest.OpType.CREATE);
|
||||
String endpoint = endpoint(indexRequest.index(), indexRequest.type(), indexRequest.id(), isCreate ? "_create" : null);
|
||||
|
||||
Params parameters = Params.builder();
|
||||
parameters.withRouting(indexRequest.routing());
|
||||
parameters.withParent(indexRequest.parent());
|
||||
parameters.withTimeout(indexRequest.timeout());
|
||||
parameters.withVersion(indexRequest.version());
|
||||
parameters.withVersionType(indexRequest.versionType());
|
||||
parameters.withPipeline(indexRequest.getPipeline());
|
||||
parameters.withRefreshPolicy(indexRequest.getRefreshPolicy());
|
||||
parameters.withWaitForActiveShards(indexRequest.waitForActiveShards());
|
||||
|
||||
BytesRef source = indexRequest.source().toBytesRef();
|
||||
ContentType contentType = ContentType.create(indexRequest.getContentType().mediaType());
|
||||
HttpEntity entity = new ByteArrayEntity(source.bytes, source.offset, source.length, contentType);
|
||||
|
||||
return new Request(method, endpoint, parameters.getParams(), entity);
|
||||
}
|
||||
|
||||
/**
|
||||
* Utility method to build request's endpoint.
|
||||
*/
|
||||
static String endpoint(String... parts) {
|
||||
if (parts == null || parts.length == 0) {
|
||||
return DELIMITER;
|
||||
}
|
||||
|
||||
StringJoiner joiner = new StringJoiner(DELIMITER, DELIMITER, "");
|
||||
for (String part : parts) {
|
||||
if (part != null) {
|
||||
joiner.add(part);
|
||||
}
|
||||
}
|
||||
return joiner.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Utility class to build request's parameters map and centralize all parameter names.
|
||||
*/
|
||||
static class Params {
|
||||
private final Map<String, String> params = new HashMap<>();
|
||||
|
||||
private Params() {
|
||||
}
|
||||
|
||||
Params putParam(String key, String value) {
|
||||
if (Strings.hasLength(value)) {
|
||||
if (params.putIfAbsent(key, value) != null) {
|
||||
throw new IllegalArgumentException("Request parameter [" + key + "] is already registered");
|
||||
}
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
Params putParam(String key, TimeValue value) {
|
||||
if (value != null) {
|
||||
return putParam(key, value.getStringRep());
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
Params withFetchSourceContext(FetchSourceContext fetchSourceContext) {
|
||||
if (fetchSourceContext != null) {
|
||||
if (fetchSourceContext.fetchSource() == false) {
|
||||
putParam("_source", Boolean.FALSE.toString());
|
||||
}
|
||||
if (fetchSourceContext.includes() != null && fetchSourceContext.includes().length > 0) {
|
||||
putParam("_source_include", String.join(",", fetchSourceContext.includes()));
|
||||
}
|
||||
if (fetchSourceContext.excludes() != null && fetchSourceContext.excludes().length > 0) {
|
||||
putParam("_source_exclude", String.join(",", fetchSourceContext.excludes()));
|
||||
}
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
Params withParent(String parent) {
|
||||
return putParam("parent", parent);
|
||||
}
|
||||
|
||||
Params withPipeline(String pipeline) {
|
||||
return putParam("pipeline", pipeline);
|
||||
}
|
||||
|
||||
Params withPreference(String preference) {
|
||||
return putParam("preference", preference);
|
||||
}
|
||||
|
||||
Params withRealtime(boolean realtime) {
|
||||
if (realtime == false) {
|
||||
return putParam("realtime", Boolean.FALSE.toString());
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
Params withRefresh(boolean refresh) {
|
||||
if (refresh) {
|
||||
return withRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
Params withRefreshPolicy(WriteRequest.RefreshPolicy refreshPolicy) {
|
||||
if (refreshPolicy != WriteRequest.RefreshPolicy.NONE) {
|
||||
putParam("refresh", refreshPolicy.getValue());
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
Params withRouting(String routing) {
|
||||
return putParam("routing", routing);
|
||||
}
|
||||
|
||||
Params withStoredFields(String[] storedFields) {
|
||||
if (storedFields != null && storedFields.length > 0) {
|
||||
return putParam("stored_fields", String.join(",", storedFields));
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
Params withTimeout(TimeValue timeout) {
|
||||
return putParam("timeout", timeout);
|
||||
}
|
||||
|
||||
Params withVersion(long version) {
|
||||
if (version != Versions.MATCH_ANY) {
|
||||
return putParam("version", Long.toString(version));
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
Params withVersionType(VersionType versionType) {
|
||||
if (versionType != VersionType.INTERNAL) {
|
||||
return putParam("version_type", versionType.name().toLowerCase(Locale.ROOT));
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
Params withWaitForActiveShards(ActiveShardCount activeShardCount) {
|
||||
if (activeShardCount != null && activeShardCount != ActiveShardCount.DEFAULT) {
|
||||
return putParam("wait_for_active_shards", activeShardCount.toString().toLowerCase(Locale.ROOT));
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
Map<String, String> getParams() {
|
||||
return Collections.unmodifiableMap(params);
|
||||
}
|
||||
|
||||
static Params builder() {
|
||||
return new Params();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -19,20 +19,39 @@
|
|||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ElasticsearchStatusException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.get.GetRequest;
|
||||
import org.elasticsearch.action.get.GetResponse;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.action.main.MainRequest;
|
||||
import org.elasticsearch.common.CheckedFunction;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.rest.BytesRestResponse;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.function.Function;
|
||||
|
||||
import static java.util.Collections.emptySet;
|
||||
import static java.util.Collections.singleton;
|
||||
|
||||
/**
|
||||
* High level REST client that wraps an instance of the low level {@link RestClient} and allows to build requests and read responses.
|
||||
* The provided {@link RestClient} is externally built and closed.
|
||||
*/
|
||||
public final class RestHighLevelClient {
|
||||
|
||||
private static final Log logger = LogFactory.getLog(RestHighLevelClient.class);
|
||||
public class RestHighLevelClient {
|
||||
|
||||
private final RestClient client;
|
||||
|
||||
|
@ -40,14 +59,204 @@ public final class RestHighLevelClient {
|
|||
this.client = Objects.requireNonNull(client);
|
||||
}
|
||||
|
||||
public boolean ping(Header... headers) {
|
||||
/**
|
||||
* Pings the remote Elasticsearch cluster and returns true if the ping succeeded, false otherwise
|
||||
*/
|
||||
public boolean ping(Header... headers) throws IOException {
|
||||
return performRequest(new MainRequest(), (request) -> Request.ping(), RestHighLevelClient::convertExistsResponse,
|
||||
emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves a document by id using the Get API
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html">Get API on elastic.co</a>
|
||||
*/
|
||||
public GetResponse get(GetRequest getRequest, Header... headers) throws IOException {
|
||||
return performRequestAndParseEntity(getRequest, Request::get, GetResponse::fromXContent, singleton(404), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously retrieves a document by id using the Get API
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html">Get API on elastic.co</a>
|
||||
*/
|
||||
public void getAsync(GetRequest getRequest, ActionListener<GetResponse> listener, Header... headers) {
|
||||
performRequestAsyncAndParseEntity(getRequest, Request::get, GetResponse::fromXContent, listener, singleton(404), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks for the existence of a document. Returns true if it exists, false otherwise
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html">Get API on elastic.co</a>
|
||||
*/
|
||||
public boolean exists(GetRequest getRequest, Header... headers) throws IOException {
|
||||
return performRequest(getRequest, Request::exists, RestHighLevelClient::convertExistsResponse, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously checks for the existence of a document. Returns true if it exists, false otherwise
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html">Get API on elastic.co</a>
|
||||
*/
|
||||
public void existsAsync(GetRequest getRequest, ActionListener<Boolean> listener, Header... headers) {
|
||||
performRequestAsync(getRequest, Request::exists, RestHighLevelClient::convertExistsResponse, listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Index a document using the Index API
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html">Index API on elastic.co</a>
|
||||
*/
|
||||
public IndexResponse index(IndexRequest indexRequest, Header... headers) throws IOException {
|
||||
return performRequestAndParseEntity(indexRequest, Request::index, IndexResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously index a document using the Index API
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html">Index API on elastic.co</a>
|
||||
*/
|
||||
public void indexAsync(IndexRequest indexRequest, ActionListener<IndexResponse> listener, Header... headers) {
|
||||
performRequestAsyncAndParseEntity(indexRequest, Request::index, IndexResponse::fromXContent, listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
private <Req extends ActionRequest, Resp> Resp performRequestAndParseEntity(Req request, Function<Req, Request> requestConverter,
|
||||
CheckedFunction<XContentParser, Resp, IOException> entityParser, Set<Integer> ignores, Header... headers) throws IOException {
|
||||
return performRequest(request, requestConverter, (response) -> parseEntity(response.getEntity(), entityParser), ignores, headers);
|
||||
}
|
||||
|
||||
<Req extends ActionRequest, Resp> Resp performRequest(Req request, Function<Req, Request> requestConverter,
|
||||
CheckedFunction<Response, Resp, IOException> responseConverter, Set<Integer> ignores, Header... headers) throws IOException {
|
||||
|
||||
ActionRequestValidationException validationException = request.validate();
|
||||
if (validationException != null) {
|
||||
throw validationException;
|
||||
}
|
||||
Request req = requestConverter.apply(request);
|
||||
Response response;
|
||||
try {
|
||||
client.performRequest("HEAD", "/", headers);
|
||||
return true;
|
||||
} catch(IOException exception) {
|
||||
return false;
|
||||
response = client.performRequest(req.method, req.endpoint, req.params, req.entity, headers);
|
||||
} catch (ResponseException e) {
|
||||
if (ignores.contains(e.getResponse().getStatusLine().getStatusCode())) {
|
||||
try {
|
||||
return responseConverter.apply(e.getResponse());
|
||||
} catch (Exception innerException) {
|
||||
throw parseResponseException(e);
|
||||
}
|
||||
}
|
||||
throw parseResponseException(e);
|
||||
}
|
||||
try {
|
||||
return responseConverter.apply(response);
|
||||
} catch(Exception e) {
|
||||
throw new IOException("Unable to parse response body for " + response, e);
|
||||
}
|
||||
}
|
||||
|
||||
private <Req extends ActionRequest, Resp> void performRequestAsyncAndParseEntity(Req request, Function<Req, Request> requestConverter,
|
||||
CheckedFunction<XContentParser, Resp, IOException> entityParser, ActionListener<Resp> listener,
|
||||
Set<Integer> ignores, Header... headers) {
|
||||
performRequestAsync(request, requestConverter, (response) -> parseEntity(response.getEntity(), entityParser),
|
||||
listener, ignores, headers);
|
||||
}
|
||||
|
||||
<Req extends ActionRequest, Resp> void performRequestAsync(Req request, Function<Req, Request> requestConverter,
|
||||
CheckedFunction<Response, Resp, IOException> responseConverter, ActionListener<Resp> listener,
|
||||
Set<Integer> ignores, Header... headers) {
|
||||
ActionRequestValidationException validationException = request.validate();
|
||||
if (validationException != null) {
|
||||
listener.onFailure(validationException);
|
||||
return;
|
||||
}
|
||||
Request req = requestConverter.apply(request);
|
||||
ResponseListener responseListener = wrapResponseListener(responseConverter, listener, ignores);
|
||||
client.performRequestAsync(req.method, req.endpoint, req.params, req.entity, responseListener, headers);
|
||||
}
|
||||
|
||||
static <Resp> ResponseListener wrapResponseListener(CheckedFunction<Response, Resp, IOException> responseConverter,
|
||||
ActionListener<Resp> actionListener, Set<Integer> ignores) {
|
||||
return new ResponseListener() {
|
||||
@Override
|
||||
public void onSuccess(Response response) {
|
||||
try {
|
||||
actionListener.onResponse(responseConverter.apply(response));
|
||||
} catch(Exception e) {
|
||||
IOException ioe = new IOException("Unable to parse response body for " + response, e);
|
||||
onFailure(ioe);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception exception) {
|
||||
if (exception instanceof ResponseException) {
|
||||
ResponseException responseException = (ResponseException) exception;
|
||||
Response response = responseException.getResponse();
|
||||
if (ignores.contains(response.getStatusLine().getStatusCode())) {
|
||||
try {
|
||||
actionListener.onResponse(responseConverter.apply(response));
|
||||
} catch (Exception innerException) {
|
||||
//the exception is ignored as we now try to parse the response as an error.
|
||||
//this covers cases like get where 404 can either be a valid document not found response,
|
||||
//or an error for which parsing is completely different. We try to consider the 404 response as a valid one
|
||||
//first. If parsing of the response breaks, we fall back to parsing it as an error.
|
||||
actionListener.onFailure(parseResponseException(responseException));
|
||||
}
|
||||
} else {
|
||||
actionListener.onFailure(parseResponseException(responseException));
|
||||
}
|
||||
} else {
|
||||
actionListener.onFailure(exception);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts a {@link ResponseException} obtained from the low level REST client into an {@link ElasticsearchException}.
|
||||
* If a response body was returned, tries to parse it as an error returned from Elasticsearch.
|
||||
* If no response body was returned or anything goes wrong while parsing the error, returns a new {@link ElasticsearchStatusException}
|
||||
* that wraps the original {@link ResponseException}. The potential exception obtained while parsing is added to the returned
|
||||
* exception as a suppressed exception. This method is guaranteed to not throw any exception eventually thrown while parsing.
|
||||
*/
|
||||
static ElasticsearchStatusException parseResponseException(ResponseException responseException) {
|
||||
Response response = responseException.getResponse();
|
||||
HttpEntity entity = response.getEntity();
|
||||
ElasticsearchStatusException elasticsearchException;
|
||||
if (entity == null) {
|
||||
elasticsearchException = new ElasticsearchStatusException(
|
||||
responseException.getMessage(), RestStatus.fromCode(response.getStatusLine().getStatusCode()), responseException);
|
||||
} else {
|
||||
try {
|
||||
elasticsearchException = parseEntity(entity, BytesRestResponse::errorFromXContent);
|
||||
elasticsearchException.addSuppressed(responseException);
|
||||
} catch (Exception e) {
|
||||
RestStatus restStatus = RestStatus.fromCode(response.getStatusLine().getStatusCode());
|
||||
elasticsearchException = new ElasticsearchStatusException("Unable to parse response body", restStatus, responseException);
|
||||
elasticsearchException.addSuppressed(e);
|
||||
}
|
||||
}
|
||||
return elasticsearchException;
|
||||
}
|
||||
|
||||
static <Resp> Resp parseEntity(
|
||||
HttpEntity entity, CheckedFunction<XContentParser, Resp, IOException> entityParser) throws IOException {
|
||||
if (entity == null) {
|
||||
throw new IllegalStateException("Response body expected but not returned");
|
||||
}
|
||||
if (entity.getContentType() == null) {
|
||||
throw new IllegalStateException("Elasticsearch didn't return the [Content-Type] header, unable to parse response body");
|
||||
}
|
||||
XContentType xContentType = XContentType.fromMediaTypeOrFormat(entity.getContentType().getValue());
|
||||
if (xContentType == null) {
|
||||
throw new IllegalStateException("Unsupported Content-Type: " + entity.getContentType().getValue());
|
||||
}
|
||||
try (XContentParser parser = xContentType.xContent().createParser(NamedXContentRegistry.EMPTY, entity.getContent())) {
|
||||
return entityParser.apply(parser);
|
||||
}
|
||||
}
|
||||
|
||||
static boolean convertExistsResponse(Response response) {
|
||||
return response.getStatusLine().getStatusCode() == 200;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,269 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ElasticsearchStatusException;
|
||||
import org.elasticsearch.action.DocWriteRequest;
|
||||
import org.elasticsearch.action.DocWriteResponse;
|
||||
import org.elasticsearch.action.get.GetRequest;
|
||||
import org.elasticsearch.action.get.GetResponse;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.containsString;
|
||||
|
||||
public class CrudIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/pull/23196")
|
||||
public void testExists() throws IOException {
|
||||
{
|
||||
GetRequest getRequest = new GetRequest("index", "type", "id");
|
||||
assertFalse(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync));
|
||||
}
|
||||
String document = "{\"field1\":\"value1\",\"field2\":\"value2\"}";
|
||||
StringEntity stringEntity = new StringEntity(document, ContentType.APPLICATION_JSON);
|
||||
Response response = client().performRequest("PUT", "/index/type/id", Collections.singletonMap("refresh", "wait_for"), stringEntity);
|
||||
assertEquals(201, response.getStatusLine().getStatusCode());
|
||||
{
|
||||
GetRequest getRequest = new GetRequest("index", "type", "id");
|
||||
assertTrue(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync));
|
||||
}
|
||||
{
|
||||
GetRequest getRequest = new GetRequest("index", "type", "does_not_exist");
|
||||
assertFalse(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync));
|
||||
}
|
||||
{
|
||||
GetRequest getRequest = new GetRequest("index", "type", "does_not_exist").version(1);
|
||||
ElasticsearchException exception = expectThrows(ElasticsearchException.class,
|
||||
() -> execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync));
|
||||
assertEquals(RestStatus.BAD_REQUEST, exception.status());
|
||||
assertThat(exception.getMessage(), containsString("/index/type/does_not_exist?version=1: HTTP/1.1 400 Bad Request"));
|
||||
}
|
||||
}
|
||||
|
||||
public void testGet() throws IOException {
|
||||
{
|
||||
GetRequest getRequest = new GetRequest("index", "type", "id");
|
||||
ElasticsearchException exception = expectThrows(ElasticsearchException.class,
|
||||
() -> execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync));
|
||||
assertEquals(RestStatus.NOT_FOUND, exception.status());
|
||||
assertEquals("Elasticsearch exception [type=index_not_found_exception, reason=no such index]", exception.getMessage());
|
||||
assertEquals("index", exception.getMetadata("es.index").get(0));
|
||||
}
|
||||
|
||||
String document = "{\"field1\":\"value1\",\"field2\":\"value2\"}";
|
||||
StringEntity stringEntity = new StringEntity(document, ContentType.APPLICATION_JSON);
|
||||
Response response = client().performRequest("PUT", "/index/type/id", Collections.singletonMap("refresh", "wait_for"), stringEntity);
|
||||
assertEquals(201, response.getStatusLine().getStatusCode());
|
||||
{
|
||||
GetRequest getRequest = new GetRequest("index", "type", "id").version(2);
|
||||
ElasticsearchException exception = expectThrows(ElasticsearchException.class,
|
||||
() -> execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync));
|
||||
assertEquals(RestStatus.CONFLICT, exception.status());
|
||||
assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, " + "reason=[type][id]: " +
|
||||
"version conflict, current version [1] is different than the one provided [2]]", exception.getMessage());
|
||||
assertEquals("index", exception.getMetadata("es.index").get(0));
|
||||
}
|
||||
{
|
||||
GetRequest getRequest = new GetRequest("index", "type", "id");
|
||||
if (randomBoolean()) {
|
||||
getRequest.version(1L);
|
||||
}
|
||||
GetResponse getResponse = execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync);
|
||||
assertEquals("index", getResponse.getIndex());
|
||||
assertEquals("type", getResponse.getType());
|
||||
assertEquals("id", getResponse.getId());
|
||||
assertTrue(getResponse.isExists());
|
||||
assertFalse(getResponse.isSourceEmpty());
|
||||
assertEquals(1L, getResponse.getVersion());
|
||||
assertEquals(document, getResponse.getSourceAsString());
|
||||
}
|
||||
{
|
||||
GetRequest getRequest = new GetRequest("index", "type", "does_not_exist");
|
||||
GetResponse getResponse = execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync);
|
||||
assertEquals("index", getResponse.getIndex());
|
||||
assertEquals("type", getResponse.getType());
|
||||
assertEquals("does_not_exist", getResponse.getId());
|
||||
assertFalse(getResponse.isExists());
|
||||
assertEquals(-1, getResponse.getVersion());
|
||||
assertTrue(getResponse.isSourceEmpty());
|
||||
assertNull(getResponse.getSourceAsString());
|
||||
}
|
||||
{
|
||||
GetRequest getRequest = new GetRequest("index", "type", "id");
|
||||
getRequest.fetchSourceContext(new FetchSourceContext(false, Strings.EMPTY_ARRAY, Strings.EMPTY_ARRAY));
|
||||
GetResponse getResponse = execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync);
|
||||
assertEquals("index", getResponse.getIndex());
|
||||
assertEquals("type", getResponse.getType());
|
||||
assertEquals("id", getResponse.getId());
|
||||
assertTrue(getResponse.isExists());
|
||||
assertTrue(getResponse.isSourceEmpty());
|
||||
assertEquals(1L, getResponse.getVersion());
|
||||
assertNull(getResponse.getSourceAsString());
|
||||
}
|
||||
{
|
||||
GetRequest getRequest = new GetRequest("index", "type", "id");
|
||||
if (randomBoolean()) {
|
||||
getRequest.fetchSourceContext(new FetchSourceContext(true, new String[]{"field1"}, Strings.EMPTY_ARRAY));
|
||||
} else {
|
||||
getRequest.fetchSourceContext(new FetchSourceContext(true, Strings.EMPTY_ARRAY, new String[]{"field2"}));
|
||||
}
|
||||
GetResponse getResponse = execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync);
|
||||
assertEquals("index", getResponse.getIndex());
|
||||
assertEquals("type", getResponse.getType());
|
||||
assertEquals("id", getResponse.getId());
|
||||
assertTrue(getResponse.isExists());
|
||||
assertFalse(getResponse.isSourceEmpty());
|
||||
assertEquals(1L, getResponse.getVersion());
|
||||
Map<String, Object> sourceAsMap = getResponse.getSourceAsMap();
|
||||
assertEquals(1, sourceAsMap.size());
|
||||
assertEquals("value1", sourceAsMap.get("field1"));
|
||||
}
|
||||
}
|
||||
|
||||
public void testIndex() throws IOException {
|
||||
final XContentType xContentType = randomFrom(XContentType.values());
|
||||
{
|
||||
IndexRequest indexRequest = new IndexRequest("index", "type");
|
||||
indexRequest.source(XContentBuilder.builder(xContentType.xContent()).startObject().field("test", "test").endObject());
|
||||
|
||||
IndexResponse indexResponse = execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync);
|
||||
assertEquals(RestStatus.CREATED, indexResponse.status());
|
||||
assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult());
|
||||
assertEquals("index", indexResponse.getIndex());
|
||||
assertEquals("type", indexResponse.getType());
|
||||
assertTrue(Strings.hasLength(indexResponse.getId()));
|
||||
assertEquals(1L, indexResponse.getVersion());
|
||||
assertNotNull(indexResponse.getShardId());
|
||||
assertEquals(-1, indexResponse.getShardId().getId());
|
||||
assertEquals("index", indexResponse.getShardId().getIndexName());
|
||||
assertEquals("index", indexResponse.getShardId().getIndex().getName());
|
||||
assertEquals("_na_", indexResponse.getShardId().getIndex().getUUID());
|
||||
assertNotNull(indexResponse.getShardInfo());
|
||||
assertEquals(0, indexResponse.getShardInfo().getFailed());
|
||||
assertTrue(indexResponse.getShardInfo().getSuccessful() > 0);
|
||||
assertTrue(indexResponse.getShardInfo().getTotal() > 0);
|
||||
}
|
||||
{
|
||||
IndexRequest indexRequest = new IndexRequest("index", "type", "id");
|
||||
indexRequest.source(XContentBuilder.builder(xContentType.xContent()).startObject().field("version", 1).endObject());
|
||||
|
||||
IndexResponse indexResponse = execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync);
|
||||
assertEquals(RestStatus.CREATED, indexResponse.status());
|
||||
assertEquals("index", indexResponse.getIndex());
|
||||
assertEquals("type", indexResponse.getType());
|
||||
assertEquals("id", indexResponse.getId());
|
||||
assertEquals(1L, indexResponse.getVersion());
|
||||
|
||||
indexRequest = new IndexRequest("index", "type", "id");
|
||||
indexRequest.source(XContentBuilder.builder(xContentType.xContent()).startObject().field("version", 2).endObject());
|
||||
|
||||
indexResponse = execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync);
|
||||
assertEquals(RestStatus.OK, indexResponse.status());
|
||||
assertEquals("index", indexResponse.getIndex());
|
||||
assertEquals("type", indexResponse.getType());
|
||||
assertEquals("id", indexResponse.getId());
|
||||
assertEquals(2L, indexResponse.getVersion());
|
||||
|
||||
ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () -> {
|
||||
IndexRequest wrongRequest = new IndexRequest("index", "type", "id");
|
||||
wrongRequest.source(XContentBuilder.builder(xContentType.xContent()).startObject().field("field", "test").endObject());
|
||||
wrongRequest.version(5L);
|
||||
|
||||
execute(wrongRequest, highLevelClient()::index, highLevelClient()::indexAsync);
|
||||
});
|
||||
assertEquals(RestStatus.CONFLICT, exception.status());
|
||||
assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[type][id]: " +
|
||||
"version conflict, current version [2] is different than the one provided [5]]", exception.getMessage());
|
||||
assertEquals("index", exception.getMetadata("es.index").get(0));
|
||||
}
|
||||
{
|
||||
ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () -> {
|
||||
IndexRequest indexRequest = new IndexRequest("index", "type", "missing_parent");
|
||||
indexRequest.source(XContentBuilder.builder(xContentType.xContent()).startObject().field("field", "test").endObject());
|
||||
indexRequest.parent("missing");
|
||||
|
||||
execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync);
|
||||
});
|
||||
|
||||
assertEquals(RestStatus.BAD_REQUEST, exception.status());
|
||||
assertEquals("Elasticsearch exception [type=illegal_argument_exception, " +
|
||||
"reason=Can't specify parent if no parent field has been configured]", exception.getMessage());
|
||||
}
|
||||
{
|
||||
ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () -> {
|
||||
IndexRequest indexRequest = new IndexRequest("index", "type", "missing_pipeline");
|
||||
indexRequest.source(XContentBuilder.builder(xContentType.xContent()).startObject().field("field", "test").endObject());
|
||||
indexRequest.setPipeline("missing");
|
||||
|
||||
execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync);
|
||||
});
|
||||
|
||||
assertEquals(RestStatus.BAD_REQUEST, exception.status());
|
||||
assertEquals("Elasticsearch exception [type=illegal_argument_exception, " +
|
||||
"reason=pipeline with id [missing] does not exist]", exception.getMessage());
|
||||
}
|
||||
{
|
||||
IndexRequest indexRequest = new IndexRequest("index", "type", "external_version_type");
|
||||
indexRequest.source(XContentBuilder.builder(xContentType.xContent()).startObject().field("field", "test").endObject());
|
||||
indexRequest.version(12L);
|
||||
indexRequest.versionType(VersionType.EXTERNAL);
|
||||
|
||||
IndexResponse indexResponse = execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync);
|
||||
assertEquals(RestStatus.CREATED, indexResponse.status());
|
||||
assertEquals("index", indexResponse.getIndex());
|
||||
assertEquals("type", indexResponse.getType());
|
||||
assertEquals("external_version_type", indexResponse.getId());
|
||||
assertEquals(12L, indexResponse.getVersion());
|
||||
}
|
||||
{
|
||||
final IndexRequest indexRequest = new IndexRequest("index", "type", "with_create_op_type");
|
||||
indexRequest.source(XContentBuilder.builder(xContentType.xContent()).startObject().field("field", "test").endObject());
|
||||
indexRequest.opType(DocWriteRequest.OpType.CREATE);
|
||||
|
||||
IndexResponse indexResponse = execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync);
|
||||
assertEquals(RestStatus.CREATED, indexResponse.status());
|
||||
assertEquals("index", indexResponse.getIndex());
|
||||
assertEquals("type", indexResponse.getType());
|
||||
assertEquals("with_create_op_type", indexResponse.getId());
|
||||
|
||||
ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () -> {
|
||||
execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync);
|
||||
});
|
||||
|
||||
assertEquals(RestStatus.CONFLICT, exception.status());
|
||||
assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[type][with_create_op_type]: " +
|
||||
"version conflict, document already exists (current version [1])]", exception.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
|
@ -19,6 +19,9 @@
|
|||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.Header;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.PlainActionFuture;
|
||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
|
@ -45,4 +48,28 @@ public abstract class ESRestHighLevelClientTestCase extends ESRestTestCase {
|
|||
protected static RestHighLevelClient highLevelClient() {
|
||||
return restHighLevelClient;
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes the provided request using either the sync method or its async variant, both provided as functions
|
||||
*/
|
||||
protected static <Req, Resp> Resp execute(Req request, SyncMethod<Req, Resp> syncMethod,
|
||||
AsyncMethod<Req, Resp> asyncMethod, Header... headers) throws IOException {
|
||||
if (randomBoolean()) {
|
||||
return syncMethod.execute(request, headers);
|
||||
} else {
|
||||
PlainActionFuture<Resp> future = PlainActionFuture.newFuture();
|
||||
asyncMethod.execute(request, future, headers);
|
||||
return future.actionGet();
|
||||
}
|
||||
}
|
||||
|
||||
@FunctionalInterface
|
||||
protected interface SyncMethod<Request, Response> {
|
||||
Response execute(Request request, Header... headers) throws IOException;
|
||||
}
|
||||
|
||||
@FunctionalInterface
|
||||
protected interface AsyncMethod<Request, Response> {
|
||||
void execute(Request request, ActionListener<Response> listener, Header... headers);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,9 +19,13 @@
|
|||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
public class MainActionIT extends ESRestHighLevelClientTestCase {
|
||||
import java.io.IOException;
|
||||
|
||||
public void testPing() {
|
||||
public class PingAndInfoIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
public void testPing() throws IOException {
|
||||
assertTrue(highLevelClient().ping());
|
||||
}
|
||||
|
||||
//TODO add here integ tests for info api: "GET /" once we have parsing code for MainResponse
|
||||
}
|
|
@ -0,0 +1,310 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.entity.ByteArrayEntity;
|
||||
import org.elasticsearch.action.DocWriteRequest;
|
||||
import org.elasticsearch.action.get.GetRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.action.support.replication.ReplicationRequest;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.function.Function;
|
||||
|
||||
public class RequestTests extends ESTestCase {
|
||||
|
||||
public void testPing() {
|
||||
Request request = Request.ping();
|
||||
assertEquals("/", request.endpoint);
|
||||
assertEquals(0, request.params.size());
|
||||
assertNull(request.entity);
|
||||
assertEquals("HEAD", request.method);
|
||||
}
|
||||
|
||||
public void testGet() {
|
||||
getAndExistsTest(Request::get, "GET");
|
||||
}
|
||||
|
||||
public void testExists() {
|
||||
getAndExistsTest(Request::exists, "HEAD");
|
||||
}
|
||||
|
||||
private static void getAndExistsTest(Function<GetRequest, Request> requestConverter, String method) {
|
||||
String index = randomAsciiOfLengthBetween(3, 10);
|
||||
String type = randomAsciiOfLengthBetween(3, 10);
|
||||
String id = randomAsciiOfLengthBetween(3, 10);
|
||||
GetRequest getRequest = new GetRequest(index, type, id);
|
||||
|
||||
Map<String, String> expectedParams = new HashMap<>();
|
||||
if (randomBoolean()) {
|
||||
if (randomBoolean()) {
|
||||
String preference = randomAsciiOfLengthBetween(3, 10);
|
||||
getRequest.preference(preference);
|
||||
expectedParams.put("preference", preference);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
String routing = randomAsciiOfLengthBetween(3, 10);
|
||||
getRequest.routing(routing);
|
||||
expectedParams.put("routing", routing);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
boolean realtime = randomBoolean();
|
||||
getRequest.realtime(realtime);
|
||||
if (realtime == false) {
|
||||
expectedParams.put("realtime", "false");
|
||||
}
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
boolean refresh = randomBoolean();
|
||||
getRequest.refresh(refresh);
|
||||
if (refresh) {
|
||||
expectedParams.put("refresh", "true");
|
||||
}
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
long version = randomLong();
|
||||
getRequest.version(version);
|
||||
if (version != Versions.MATCH_ANY) {
|
||||
expectedParams.put("version", Long.toString(version));
|
||||
}
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
VersionType versionType = randomFrom(VersionType.values());
|
||||
getRequest.versionType(versionType);
|
||||
if (versionType != VersionType.INTERNAL) {
|
||||
expectedParams.put("version_type", versionType.name().toLowerCase(Locale.ROOT));
|
||||
}
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
int numStoredFields = randomIntBetween(1, 10);
|
||||
String[] storedFields = new String[numStoredFields];
|
||||
StringBuilder storedFieldsParam = new StringBuilder();
|
||||
for (int i = 0; i < numStoredFields; i++) {
|
||||
String storedField = randomAsciiOfLengthBetween(3, 10);
|
||||
storedFields[i] = storedField;
|
||||
storedFieldsParam.append(storedField);
|
||||
if (i < numStoredFields - 1) {
|
||||
storedFieldsParam.append(",");
|
||||
}
|
||||
}
|
||||
getRequest.storedFields(storedFields);
|
||||
expectedParams.put("stored_fields", storedFieldsParam.toString());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
if (randomBoolean()) {
|
||||
boolean fetchSource = randomBoolean();
|
||||
getRequest.fetchSourceContext(new FetchSourceContext(fetchSource));
|
||||
if (fetchSource == false) {
|
||||
expectedParams.put("_source", "false");
|
||||
}
|
||||
} else {
|
||||
int numIncludes = randomIntBetween(0, 5);
|
||||
String[] includes = new String[numIncludes];
|
||||
StringBuilder includesParam = new StringBuilder();
|
||||
for (int i = 0; i < numIncludes; i++) {
|
||||
String include = randomAsciiOfLengthBetween(3, 10);
|
||||
includes[i] = include;
|
||||
includesParam.append(include);
|
||||
if (i < numIncludes - 1) {
|
||||
includesParam.append(",");
|
||||
}
|
||||
}
|
||||
if (numIncludes > 0) {
|
||||
expectedParams.put("_source_include", includesParam.toString());
|
||||
}
|
||||
int numExcludes = randomIntBetween(0, 5);
|
||||
String[] excludes = new String[numExcludes];
|
||||
StringBuilder excludesParam = new StringBuilder();
|
||||
for (int i = 0; i < numExcludes; i++) {
|
||||
String exclude = randomAsciiOfLengthBetween(3, 10);
|
||||
excludes[i] = exclude;
|
||||
excludesParam.append(exclude);
|
||||
if (i < numExcludes - 1) {
|
||||
excludesParam.append(",");
|
||||
}
|
||||
}
|
||||
if (numExcludes > 0) {
|
||||
expectedParams.put("_source_exclude", excludesParam.toString());
|
||||
}
|
||||
getRequest.fetchSourceContext(new FetchSourceContext(true, includes, excludes));
|
||||
}
|
||||
}
|
||||
}
|
||||
Request request = requestConverter.apply(getRequest);
|
||||
assertEquals("/" + index + "/" + type + "/" + id, request.endpoint);
|
||||
assertEquals(expectedParams, request.params);
|
||||
assertNull(request.entity);
|
||||
assertEquals(method, request.method);
|
||||
}
|
||||
|
||||
public void testIndex() throws IOException {
|
||||
String index = randomAsciiOfLengthBetween(3, 10);
|
||||
String type = randomAsciiOfLengthBetween(3, 10);
|
||||
IndexRequest indexRequest = new IndexRequest(index, type);
|
||||
|
||||
String id = randomBoolean() ? randomAsciiOfLengthBetween(3, 10) : null;
|
||||
indexRequest.id(id);
|
||||
|
||||
Map<String, String> expectedParams = new HashMap<>();
|
||||
|
||||
String method = "POST";
|
||||
if (id != null) {
|
||||
method = "PUT";
|
||||
if (randomBoolean()) {
|
||||
indexRequest.opType(DocWriteRequest.OpType.CREATE);
|
||||
}
|
||||
}
|
||||
|
||||
// There is some logic around _create endpoint and version/version type
|
||||
if (indexRequest.opType() == DocWriteRequest.OpType.CREATE) {
|
||||
indexRequest.version(randomFrom(Versions.MATCH_ANY, Versions.MATCH_DELETED));
|
||||
expectedParams.put("version", Long.toString(Versions.MATCH_DELETED));
|
||||
} else {
|
||||
if (randomBoolean()) {
|
||||
long version = randomFrom(Versions.MATCH_ANY, Versions.MATCH_DELETED, Versions.NOT_FOUND, randomNonNegativeLong());
|
||||
indexRequest.version(version);
|
||||
if (version != Versions.MATCH_ANY) {
|
||||
expectedParams.put("version", Long.toString(version));
|
||||
}
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
VersionType versionType = randomFrom(VersionType.values());
|
||||
indexRequest.versionType(versionType);
|
||||
if (versionType != VersionType.INTERNAL) {
|
||||
expectedParams.put("version_type", versionType.name().toLowerCase(Locale.ROOT));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (randomBoolean()) {
|
||||
String timeout = randomTimeValue();
|
||||
indexRequest.timeout(timeout);
|
||||
expectedParams.put("timeout", timeout);
|
||||
} else {
|
||||
expectedParams.put("timeout", ReplicationRequest.DEFAULT_TIMEOUT.getStringRep());
|
||||
}
|
||||
|
||||
if (frequently()) {
|
||||
if (randomBoolean()) {
|
||||
String routing = randomAsciiOfLengthBetween(3, 10);
|
||||
indexRequest.routing(routing);
|
||||
expectedParams.put("routing", routing);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
String parent = randomAsciiOfLengthBetween(3, 10);
|
||||
indexRequest.parent(parent);
|
||||
expectedParams.put("parent", parent);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
String pipeline = randomAsciiOfLengthBetween(3, 10);
|
||||
indexRequest.setPipeline(pipeline);
|
||||
expectedParams.put("pipeline", pipeline);
|
||||
}
|
||||
|
||||
if (randomBoolean()) {
|
||||
WriteRequest.RefreshPolicy refreshPolicy = randomFrom(WriteRequest.RefreshPolicy.values());
|
||||
indexRequest.setRefreshPolicy(refreshPolicy);
|
||||
if (refreshPolicy != WriteRequest.RefreshPolicy.NONE) {
|
||||
expectedParams.put("refresh", refreshPolicy.getValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
XContentType xContentType = randomFrom(XContentType.values());
|
||||
int nbFields = randomIntBetween(0, 10);
|
||||
try (XContentBuilder builder = XContentBuilder.builder(xContentType.xContent())) {
|
||||
builder.startObject();
|
||||
for (int i = 0; i < nbFields; i++) {
|
||||
builder.field("field_" + i, i);
|
||||
}
|
||||
builder.endObject();
|
||||
indexRequest.source(builder);
|
||||
}
|
||||
|
||||
Request request = Request.index(indexRequest);
|
||||
if (indexRequest.opType() == DocWriteRequest.OpType.CREATE) {
|
||||
assertEquals("/" + index + "/" + type + "/" + id + "/_create", request.endpoint);
|
||||
} else if (id != null) {
|
||||
assertEquals("/" + index + "/" + type + "/" + id, request.endpoint);
|
||||
} else {
|
||||
assertEquals("/" + index + "/" + type, request.endpoint);
|
||||
}
|
||||
assertEquals(expectedParams, request.params);
|
||||
assertEquals(method, request.method);
|
||||
|
||||
HttpEntity entity = request.entity;
|
||||
assertNotNull(entity);
|
||||
assertTrue(entity instanceof ByteArrayEntity);
|
||||
|
||||
try (XContentParser parser = createParser(xContentType.xContent(), entity.getContent())) {
|
||||
assertEquals(nbFields, parser.map().size());
|
||||
}
|
||||
}
|
||||
|
||||
public void testParams() {
|
||||
final int nbParams = randomIntBetween(0, 10);
|
||||
Request.Params params = Request.Params.builder();
|
||||
Map<String, String> expectedParams = new HashMap<>();
|
||||
for (int i = 0; i < nbParams; i++) {
|
||||
String paramName = "p_" + i;
|
||||
String paramValue = randomAsciiOfLength(5);
|
||||
params.putParam(paramName, paramValue);
|
||||
expectedParams.put(paramName, paramValue);
|
||||
}
|
||||
|
||||
Map<String, String> requestParams = params.getParams();
|
||||
assertEquals(nbParams, requestParams.size());
|
||||
assertEquals(expectedParams, requestParams);
|
||||
}
|
||||
|
||||
public void testParamsNoDuplicates() {
|
||||
Request.Params params = Request.Params.builder();
|
||||
params.putParam("test", "1");
|
||||
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> params.putParam("test", "2"));
|
||||
assertEquals("Request parameter [test] is already registered", e.getMessage());
|
||||
|
||||
Map<String, String> requestParams = params.getParams();
|
||||
assertEquals(1L, requestParams.size());
|
||||
assertEquals("1", requestParams.values().iterator().next());
|
||||
}
|
||||
|
||||
public void testEndpoint() {
|
||||
assertEquals("/", Request.endpoint());
|
||||
assertEquals("/", Request.endpoint(Strings.EMPTY_ARRAY));
|
||||
assertEquals("/", Request.endpoint(""));
|
||||
assertEquals("/a/b", Request.endpoint("a", "b"));
|
||||
assertEquals("/a/b/_create", Request.endpoint("a", "b", "_create"));
|
||||
assertEquals("/a/b/c/_create", Request.endpoint("a", "b", "c", "_create"));
|
||||
}
|
||||
}
|
|
@ -19,17 +19,51 @@
|
|||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonParseException;
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.HttpResponse;
|
||||
import org.apache.http.ProtocolVersion;
|
||||
import org.apache.http.RequestLine;
|
||||
import org.apache.http.StatusLine;
|
||||
import org.apache.http.entity.BasicHttpEntity;
|
||||
import org.apache.http.entity.ByteArrayEntity;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.message.BasicHttpResponse;
|
||||
import org.apache.http.message.BasicRequestLine;
|
||||
import org.apache.http.message.BasicStatusLine;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.main.MainRequest;
|
||||
import org.elasticsearch.common.CheckedFunction;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.cbor.CborXContent;
|
||||
import org.elasticsearch.common.xcontent.smile.SmileXContent;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.junit.Before;
|
||||
import org.mockito.ArgumentMatcher;
|
||||
import org.mockito.Matchers;
|
||||
import org.mockito.internal.matchers.ArrayEquals;
|
||||
import org.mockito.internal.matchers.VarargMatcher;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.SocketTimeoutException;
|
||||
import java.util.Collections;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.function.Function;
|
||||
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.hamcrest.CoreMatchers.instanceOf;
|
||||
import static org.mockito.Matchers.anyMapOf;
|
||||
import static org.mockito.Matchers.anyObject;
|
||||
import static org.mockito.Matchers.anyString;
|
||||
import static org.mockito.Matchers.anyVararg;
|
||||
import static org.mockito.Matchers.argThat;
|
||||
import static org.mockito.Matchers.eq;
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
@ -38,6 +72,9 @@ import static org.mockito.Mockito.when;
|
|||
|
||||
public class RestHighLevelClientTests extends ESTestCase {
|
||||
|
||||
private static final ProtocolVersion HTTP_PROTOCOL = new ProtocolVersion("http", 1, 1);
|
||||
private static final RequestLine REQUEST_LINE = new BasicRequestLine("GET", "/", HTTP_PROTOCOL);
|
||||
|
||||
private RestClient restClient;
|
||||
private RestHighLevelClient restHighLevelClient;
|
||||
|
||||
|
@ -47,28 +84,473 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
restHighLevelClient = new RestHighLevelClient(restClient);
|
||||
}
|
||||
|
||||
public void testPing() throws IOException {
|
||||
assertTrue(restHighLevelClient.ping());
|
||||
verify(restClient).performRequest(eq("HEAD"), eq("/"), argThat(new HeadersVarargMatcher()));
|
||||
}
|
||||
|
||||
public void testPingFailure() throws IOException {
|
||||
when(restClient.performRequest(any(), any())).thenThrow(new IllegalStateException());
|
||||
expectThrows(IllegalStateException.class, () -> restHighLevelClient.ping());
|
||||
}
|
||||
|
||||
public void testPingFailed() throws IOException {
|
||||
when(restClient.performRequest(any(), any())).thenThrow(new SocketTimeoutException());
|
||||
assertFalse(restHighLevelClient.ping());
|
||||
}
|
||||
|
||||
public void testPingWithHeaders() throws IOException {
|
||||
public void testPingSuccessful() throws IOException {
|
||||
Header[] headers = RestClientTestUtil.randomHeaders(random(), "Header");
|
||||
Response response = mock(Response.class);
|
||||
when(response.getStatusLine()).thenReturn(newStatusLine(RestStatus.OK));
|
||||
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class),
|
||||
anyObject(), anyVararg())).thenReturn(response);
|
||||
assertTrue(restHighLevelClient.ping(headers));
|
||||
verify(restClient).performRequest(eq("HEAD"), eq("/"), argThat(new HeadersVarargMatcher(headers)));
|
||||
verify(restClient).performRequest(eq("HEAD"), eq("/"), eq(Collections.emptyMap()),
|
||||
Matchers.isNull(HttpEntity.class), argThat(new HeadersVarargMatcher(headers)));
|
||||
}
|
||||
|
||||
private class HeadersVarargMatcher extends ArgumentMatcher<Header[]> implements VarargMatcher {
|
||||
public void testPing404NotFound() throws IOException {
|
||||
Header[] headers = RestClientTestUtil.randomHeaders(random(), "Header");
|
||||
Response response = mock(Response.class);
|
||||
when(response.getStatusLine()).thenReturn(newStatusLine(RestStatus.NOT_FOUND));
|
||||
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class),
|
||||
anyObject(), anyVararg())).thenReturn(response);
|
||||
assertFalse(restHighLevelClient.ping(headers));
|
||||
verify(restClient).performRequest(eq("HEAD"), eq("/"), eq(Collections.emptyMap()),
|
||||
Matchers.isNull(HttpEntity.class), argThat(new HeadersVarargMatcher(headers)));
|
||||
}
|
||||
|
||||
public void testPingSocketTimeout() throws IOException {
|
||||
Header[] headers = RestClientTestUtil.randomHeaders(random(), "Header");
|
||||
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class),
|
||||
anyObject(), anyVararg())).thenThrow(new SocketTimeoutException());
|
||||
expectThrows(SocketTimeoutException.class, () -> restHighLevelClient.ping(headers));
|
||||
verify(restClient).performRequest(eq("HEAD"), eq("/"), eq(Collections.emptyMap()),
|
||||
Matchers.isNull(HttpEntity.class), argThat(new HeadersVarargMatcher(headers)));
|
||||
}
|
||||
|
||||
public void testRequestValidation() {
|
||||
ActionRequestValidationException validationException = new ActionRequestValidationException();
|
||||
validationException.addValidationError("validation error");
|
||||
ActionRequest request = new ActionRequest() {
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
return validationException;
|
||||
}
|
||||
};
|
||||
|
||||
{
|
||||
ActionRequestValidationException actualException = expectThrows(ActionRequestValidationException.class,
|
||||
() -> restHighLevelClient.performRequest(request, null, null, null));
|
||||
assertSame(validationException, actualException);
|
||||
}
|
||||
{
|
||||
TrackingActionListener trackingActionListener = new TrackingActionListener();
|
||||
restHighLevelClient.performRequestAsync(request, null, null, trackingActionListener, null);
|
||||
assertSame(validationException, trackingActionListener.exception.get());
|
||||
}
|
||||
}
|
||||
|
||||
public void testParseEntity() throws IOException {
|
||||
{
|
||||
IllegalStateException ise = expectThrows(IllegalStateException.class, () -> RestHighLevelClient.parseEntity(null, null));
|
||||
assertEquals("Response body expected but not returned", ise.getMessage());
|
||||
}
|
||||
{
|
||||
IllegalStateException ise = expectThrows(IllegalStateException.class,
|
||||
() -> RestHighLevelClient.parseEntity(new BasicHttpEntity(), null));
|
||||
assertEquals("Elasticsearch didn't return the [Content-Type] header, unable to parse response body", ise.getMessage());
|
||||
}
|
||||
{
|
||||
StringEntity entity = new StringEntity("", ContentType.APPLICATION_SVG_XML);
|
||||
IllegalStateException ise = expectThrows(IllegalStateException.class, () -> RestHighLevelClient.parseEntity(entity, null));
|
||||
assertEquals("Unsupported Content-Type: " + entity.getContentType().getValue(), ise.getMessage());
|
||||
}
|
||||
{
|
||||
CheckedFunction<XContentParser, String, IOException> entityParser = parser -> {
|
||||
assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken());
|
||||
assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken());
|
||||
assertTrue(parser.nextToken().isValue());
|
||||
String value = parser.text();
|
||||
assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken());
|
||||
return value;
|
||||
};
|
||||
HttpEntity jsonEntity = new StringEntity("{\"field\":\"value\"}", ContentType.APPLICATION_JSON);
|
||||
assertEquals("value", RestHighLevelClient.parseEntity(jsonEntity, entityParser));
|
||||
HttpEntity yamlEntity = new StringEntity("---\nfield: value\n", ContentType.create("application/yaml"));
|
||||
assertEquals("value", RestHighLevelClient.parseEntity(yamlEntity, entityParser));
|
||||
HttpEntity smileEntity = createBinaryEntity(SmileXContent.contentBuilder(), ContentType.create("application/smile"));
|
||||
assertEquals("value", RestHighLevelClient.parseEntity(smileEntity, entityParser));
|
||||
HttpEntity cborEntity = createBinaryEntity(CborXContent.contentBuilder(), ContentType.create("application/cbor"));
|
||||
assertEquals("value", RestHighLevelClient.parseEntity(cborEntity, entityParser));
|
||||
}
|
||||
}
|
||||
|
||||
private static HttpEntity createBinaryEntity(XContentBuilder xContentBuilder, ContentType contentType) throws IOException {
|
||||
try (XContentBuilder builder = xContentBuilder) {
|
||||
builder.startObject();
|
||||
builder.field("field", "value");
|
||||
builder.endObject();
|
||||
return new ByteArrayEntity(builder.bytes().toBytesRef().bytes, contentType);
|
||||
}
|
||||
}
|
||||
|
||||
public void testConvertExistsResponse() {
|
||||
RestStatus restStatus = randomBoolean() ? RestStatus.OK : randomFrom(RestStatus.values());
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
|
||||
Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
|
||||
boolean result = RestHighLevelClient.convertExistsResponse(response);
|
||||
assertEquals(restStatus == RestStatus.OK, result);
|
||||
}
|
||||
|
||||
public void testParseResponseException() throws IOException {
|
||||
{
|
||||
RestStatus restStatus = randomFrom(RestStatus.values());
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
|
||||
Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
|
||||
ResponseException responseException = new ResponseException(response);
|
||||
ElasticsearchException elasticsearchException = RestHighLevelClient.parseResponseException(responseException);
|
||||
assertEquals(responseException.getMessage(), elasticsearchException.getMessage());
|
||||
assertEquals(restStatus, elasticsearchException.status());
|
||||
assertSame(responseException, elasticsearchException.getCause());
|
||||
}
|
||||
{
|
||||
RestStatus restStatus = randomFrom(RestStatus.values());
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
|
||||
httpResponse.setEntity(new StringEntity("{\"error\":\"test error message\",\"status\":" + restStatus.getStatus() + "}",
|
||||
ContentType.APPLICATION_JSON));
|
||||
Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
|
||||
ResponseException responseException = new ResponseException(response);
|
||||
ElasticsearchException elasticsearchException = RestHighLevelClient.parseResponseException(responseException);
|
||||
assertEquals("Elasticsearch exception [type=exception, reason=test error message]", elasticsearchException.getMessage());
|
||||
assertEquals(restStatus, elasticsearchException.status());
|
||||
assertSame(responseException, elasticsearchException.getSuppressed()[0]);
|
||||
}
|
||||
{
|
||||
RestStatus restStatus = randomFrom(RestStatus.values());
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
|
||||
httpResponse.setEntity(new StringEntity("{\"error\":", ContentType.APPLICATION_JSON));
|
||||
Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
|
||||
ResponseException responseException = new ResponseException(response);
|
||||
ElasticsearchException elasticsearchException = RestHighLevelClient.parseResponseException(responseException);
|
||||
assertEquals("Unable to parse response body", elasticsearchException.getMessage());
|
||||
assertEquals(restStatus, elasticsearchException.status());
|
||||
assertSame(responseException, elasticsearchException.getCause());
|
||||
assertThat(elasticsearchException.getSuppressed()[0], instanceOf(IOException.class));
|
||||
}
|
||||
{
|
||||
RestStatus restStatus = randomFrom(RestStatus.values());
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
|
||||
httpResponse.setEntity(new StringEntity("{\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON));
|
||||
Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
|
||||
ResponseException responseException = new ResponseException(response);
|
||||
ElasticsearchException elasticsearchException = RestHighLevelClient.parseResponseException(responseException);
|
||||
assertEquals("Unable to parse response body", elasticsearchException.getMessage());
|
||||
assertEquals(restStatus, elasticsearchException.status());
|
||||
assertSame(responseException, elasticsearchException.getCause());
|
||||
assertThat(elasticsearchException.getSuppressed()[0], instanceOf(IllegalStateException.class));
|
||||
}
|
||||
}
|
||||
|
||||
public void testPerformRequestOnSuccess() throws IOException {
|
||||
MainRequest mainRequest = new MainRequest();
|
||||
Function<MainRequest, Request> requestConverter = request -> new Request("GET", "/", Collections.emptyMap(), null);
|
||||
RestStatus restStatus = randomFrom(RestStatus.values());
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
|
||||
Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
|
||||
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class),
|
||||
anyObject(), anyVararg())).thenReturn(mockResponse);
|
||||
{
|
||||
Integer result = restHighLevelClient.performRequest(mainRequest, requestConverter,
|
||||
response -> response.getStatusLine().getStatusCode(), Collections.emptySet());
|
||||
assertEquals(restStatus.getStatus(), result.intValue());
|
||||
}
|
||||
{
|
||||
IOException ioe = expectThrows(IOException.class, () -> restHighLevelClient.performRequest(mainRequest,
|
||||
requestConverter, response -> {throw new IllegalStateException();}, Collections.emptySet()));
|
||||
assertEquals("Unable to parse response body for Response{requestLine=GET / http/1.1, host=http://localhost:9200, " +
|
||||
"response=http/1.1 " + restStatus.getStatus() + " " + restStatus.name() + "}", ioe.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
public void testPerformRequestOnResponseExceptionWithoutEntity() throws IOException {
|
||||
MainRequest mainRequest = new MainRequest();
|
||||
Function<MainRequest, Request> requestConverter = request -> new Request("GET", "/", Collections.emptyMap(), null);
|
||||
RestStatus restStatus = randomFrom(RestStatus.values());
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
|
||||
Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
|
||||
ResponseException responseException = new ResponseException(mockResponse);
|
||||
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class),
|
||||
anyObject(), anyVararg())).thenThrow(responseException);
|
||||
ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class,
|
||||
() -> restHighLevelClient.performRequest(mainRequest, requestConverter,
|
||||
response -> response.getStatusLine().getStatusCode(), Collections.emptySet()));
|
||||
assertEquals(responseException.getMessage(), elasticsearchException.getMessage());
|
||||
assertEquals(restStatus, elasticsearchException.status());
|
||||
assertSame(responseException, elasticsearchException.getCause());
|
||||
}
|
||||
|
||||
public void testPerformRequestOnResponseExceptionWithEntity() throws IOException {
|
||||
MainRequest mainRequest = new MainRequest();
|
||||
Function<MainRequest, Request> requestConverter = request -> new Request("GET", "/", Collections.emptyMap(), null);
|
||||
RestStatus restStatus = randomFrom(RestStatus.values());
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
|
||||
httpResponse.setEntity(new StringEntity("{\"error\":\"test error message\",\"status\":" + restStatus.getStatus() + "}",
|
||||
ContentType.APPLICATION_JSON));
|
||||
Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
|
||||
ResponseException responseException = new ResponseException(mockResponse);
|
||||
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class),
|
||||
anyObject(), anyVararg())).thenThrow(responseException);
|
||||
ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class,
|
||||
() -> restHighLevelClient.performRequest(mainRequest, requestConverter,
|
||||
response -> response.getStatusLine().getStatusCode(), Collections.emptySet()));
|
||||
assertEquals("Elasticsearch exception [type=exception, reason=test error message]", elasticsearchException.getMessage());
|
||||
assertEquals(restStatus, elasticsearchException.status());
|
||||
assertSame(responseException, elasticsearchException.getSuppressed()[0]);
|
||||
}
|
||||
|
||||
public void testPerformRequestOnResponseExceptionWithBrokenEntity() throws IOException {
|
||||
MainRequest mainRequest = new MainRequest();
|
||||
Function<MainRequest, Request> requestConverter = request -> new Request("GET", "/", Collections.emptyMap(), null);
|
||||
RestStatus restStatus = randomFrom(RestStatus.values());
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
|
||||
httpResponse.setEntity(new StringEntity("{\"error\":", ContentType.APPLICATION_JSON));
|
||||
Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
|
||||
ResponseException responseException = new ResponseException(mockResponse);
|
||||
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class),
|
||||
anyObject(), anyVararg())).thenThrow(responseException);
|
||||
ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class,
|
||||
() -> restHighLevelClient.performRequest(mainRequest, requestConverter,
|
||||
response -> response.getStatusLine().getStatusCode(), Collections.emptySet()));
|
||||
assertEquals("Unable to parse response body", elasticsearchException.getMessage());
|
||||
assertEquals(restStatus, elasticsearchException.status());
|
||||
assertSame(responseException, elasticsearchException.getCause());
|
||||
assertThat(elasticsearchException.getSuppressed()[0], instanceOf(JsonParseException.class));
|
||||
}
|
||||
|
||||
public void testPerformRequestOnResponseExceptionWithBrokenEntity2() throws IOException {
|
||||
MainRequest mainRequest = new MainRequest();
|
||||
Function<MainRequest, Request> requestConverter = request -> new Request("GET", "/", Collections.emptyMap(), null);
|
||||
RestStatus restStatus = randomFrom(RestStatus.values());
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
|
||||
httpResponse.setEntity(new StringEntity("{\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON));
|
||||
Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
|
||||
ResponseException responseException = new ResponseException(mockResponse);
|
||||
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class),
|
||||
anyObject(), anyVararg())).thenThrow(responseException);
|
||||
ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class,
|
||||
() -> restHighLevelClient.performRequest(mainRequest, requestConverter,
|
||||
response -> response.getStatusLine().getStatusCode(), Collections.emptySet()));
|
||||
assertEquals("Unable to parse response body", elasticsearchException.getMessage());
|
||||
assertEquals(restStatus, elasticsearchException.status());
|
||||
assertSame(responseException, elasticsearchException.getCause());
|
||||
assertThat(elasticsearchException.getSuppressed()[0], instanceOf(IllegalStateException.class));
|
||||
}
|
||||
|
||||
public void testPerformRequestOnResponseExceptionWithIgnores() throws IOException {
|
||||
MainRequest mainRequest = new MainRequest();
|
||||
Function<MainRequest, Request> requestConverter = request -> new Request("GET", "/", Collections.emptyMap(), null);
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND));
|
||||
Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
|
||||
ResponseException responseException = new ResponseException(mockResponse);
|
||||
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class),
|
||||
anyObject(), anyVararg())).thenThrow(responseException);
|
||||
//although we got an exception, we turn it into a successful response because the status code was provided among ignores
|
||||
assertEquals(Integer.valueOf(404), restHighLevelClient.performRequest(mainRequest, requestConverter,
|
||||
response -> response.getStatusLine().getStatusCode(), Collections.singleton(404)));
|
||||
}
|
||||
|
||||
public void testPerformRequestOnResponseExceptionWithIgnoresErrorNoBody() throws IOException {
|
||||
MainRequest mainRequest = new MainRequest();
|
||||
Function<MainRequest, Request> requestConverter = request -> new Request("GET", "/", Collections.emptyMap(), null);
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND));
|
||||
Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
|
||||
ResponseException responseException = new ResponseException(mockResponse);
|
||||
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class),
|
||||
anyObject(), anyVararg())).thenThrow(responseException);
|
||||
ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class,
|
||||
() -> restHighLevelClient.performRequest(mainRequest, requestConverter,
|
||||
response -> {throw new IllegalStateException();}, Collections.singleton(404)));
|
||||
assertEquals(RestStatus.NOT_FOUND, elasticsearchException.status());
|
||||
assertSame(responseException, elasticsearchException.getCause());
|
||||
assertEquals(responseException.getMessage(), elasticsearchException.getMessage());
|
||||
}
|
||||
|
||||
public void testPerformRequestOnResponseExceptionWithIgnoresErrorValidBody() throws IOException {
|
||||
MainRequest mainRequest = new MainRequest();
|
||||
Function<MainRequest, Request> requestConverter = request -> new Request("GET", "/", Collections.emptyMap(), null);
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND));
|
||||
httpResponse.setEntity(new StringEntity("{\"error\":\"test error message\",\"status\":404}",
|
||||
ContentType.APPLICATION_JSON));
|
||||
Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
|
||||
ResponseException responseException = new ResponseException(mockResponse);
|
||||
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class),
|
||||
anyObject(), anyVararg())).thenThrow(responseException);
|
||||
ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class,
|
||||
() -> restHighLevelClient.performRequest(mainRequest, requestConverter,
|
||||
response -> {throw new IllegalStateException();}, Collections.singleton(404)));
|
||||
assertEquals(RestStatus.NOT_FOUND, elasticsearchException.status());
|
||||
assertSame(responseException, elasticsearchException.getSuppressed()[0]);
|
||||
assertEquals("Elasticsearch exception [type=exception, reason=test error message]", elasticsearchException.getMessage());
|
||||
}
|
||||
|
||||
public void testWrapResponseListenerOnSuccess() throws IOException {
|
||||
{
|
||||
TrackingActionListener trackingActionListener = new TrackingActionListener();
|
||||
ResponseListener responseListener = RestHighLevelClient.wrapResponseListener(
|
||||
response -> response.getStatusLine().getStatusCode(), trackingActionListener, Collections.emptySet());
|
||||
RestStatus restStatus = randomFrom(RestStatus.values());
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
|
||||
responseListener.onSuccess(new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse));
|
||||
assertNull(trackingActionListener.exception.get());
|
||||
assertEquals(restStatus.getStatus(), trackingActionListener.statusCode.get());
|
||||
}
|
||||
{
|
||||
TrackingActionListener trackingActionListener = new TrackingActionListener();
|
||||
ResponseListener responseListener = RestHighLevelClient.wrapResponseListener(
|
||||
response -> {throw new IllegalStateException();}, trackingActionListener, Collections.emptySet());
|
||||
RestStatus restStatus = randomFrom(RestStatus.values());
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
|
||||
responseListener.onSuccess(new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse));
|
||||
assertThat(trackingActionListener.exception.get(), instanceOf(IOException.class));
|
||||
IOException ioe = (IOException) trackingActionListener.exception.get();
|
||||
assertEquals("Unable to parse response body for Response{requestLine=GET / http/1.1, host=http://localhost:9200, " +
|
||||
"response=http/1.1 " + restStatus.getStatus() + " " + restStatus.name() + "}", ioe.getMessage());
|
||||
assertThat(ioe.getCause(), instanceOf(IllegalStateException.class));
|
||||
}
|
||||
}
|
||||
|
||||
public void testWrapResponseListenerOnException() throws IOException {
|
||||
TrackingActionListener trackingActionListener = new TrackingActionListener();
|
||||
ResponseListener responseListener = RestHighLevelClient.wrapResponseListener(
|
||||
response -> response.getStatusLine().getStatusCode(), trackingActionListener, Collections.emptySet());
|
||||
IllegalStateException exception = new IllegalStateException();
|
||||
responseListener.onFailure(exception);
|
||||
assertSame(exception, trackingActionListener.exception.get());
|
||||
}
|
||||
|
||||
public void testWrapResponseListenerOnResponseExceptionWithoutEntity() throws IOException {
|
||||
TrackingActionListener trackingActionListener = new TrackingActionListener();
|
||||
ResponseListener responseListener = RestHighLevelClient.wrapResponseListener(
|
||||
response -> response.getStatusLine().getStatusCode(), trackingActionListener, Collections.emptySet());
|
||||
RestStatus restStatus = randomFrom(RestStatus.values());
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
|
||||
Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
|
||||
ResponseException responseException = new ResponseException(response);
|
||||
responseListener.onFailure(responseException);
|
||||
assertThat(trackingActionListener.exception.get(), instanceOf(ElasticsearchException.class));
|
||||
ElasticsearchException elasticsearchException = (ElasticsearchException) trackingActionListener.exception.get();
|
||||
assertEquals(responseException.getMessage(), elasticsearchException.getMessage());
|
||||
assertEquals(restStatus, elasticsearchException.status());
|
||||
assertSame(responseException, elasticsearchException.getCause());
|
||||
}
|
||||
|
||||
public void testWrapResponseListenerOnResponseExceptionWithEntity() throws IOException {
|
||||
TrackingActionListener trackingActionListener = new TrackingActionListener();
|
||||
ResponseListener responseListener = RestHighLevelClient.wrapResponseListener(
|
||||
response -> response.getStatusLine().getStatusCode(), trackingActionListener, Collections.emptySet());
|
||||
RestStatus restStatus = randomFrom(RestStatus.values());
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
|
||||
httpResponse.setEntity(new StringEntity("{\"error\":\"test error message\",\"status\":" + restStatus.getStatus() + "}",
|
||||
ContentType.APPLICATION_JSON));
|
||||
Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
|
||||
ResponseException responseException = new ResponseException(response);
|
||||
responseListener.onFailure(responseException);
|
||||
assertThat(trackingActionListener.exception.get(), instanceOf(ElasticsearchException.class));
|
||||
ElasticsearchException elasticsearchException = (ElasticsearchException)trackingActionListener.exception.get();
|
||||
assertEquals("Elasticsearch exception [type=exception, reason=test error message]", elasticsearchException.getMessage());
|
||||
assertEquals(restStatus, elasticsearchException.status());
|
||||
assertSame(responseException, elasticsearchException.getSuppressed()[0]);
|
||||
}
|
||||
|
||||
public void testWrapResponseListenerOnResponseExceptionWithBrokenEntity() throws IOException {
|
||||
{
|
||||
TrackingActionListener trackingActionListener = new TrackingActionListener();
|
||||
ResponseListener responseListener = RestHighLevelClient.wrapResponseListener(
|
||||
response -> response.getStatusLine().getStatusCode(), trackingActionListener, Collections.emptySet());
|
||||
RestStatus restStatus = randomFrom(RestStatus.values());
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
|
||||
httpResponse.setEntity(new StringEntity("{\"error\":", ContentType.APPLICATION_JSON));
|
||||
Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
|
||||
ResponseException responseException = new ResponseException(response);
|
||||
responseListener.onFailure(responseException);
|
||||
assertThat(trackingActionListener.exception.get(), instanceOf(ElasticsearchException.class));
|
||||
ElasticsearchException elasticsearchException = (ElasticsearchException)trackingActionListener.exception.get();
|
||||
assertEquals("Unable to parse response body", elasticsearchException.getMessage());
|
||||
assertEquals(restStatus, elasticsearchException.status());
|
||||
assertSame(responseException, elasticsearchException.getCause());
|
||||
assertThat(elasticsearchException.getSuppressed()[0], instanceOf(JsonParseException.class));
|
||||
}
|
||||
{
|
||||
TrackingActionListener trackingActionListener = new TrackingActionListener();
|
||||
ResponseListener responseListener = RestHighLevelClient.wrapResponseListener(
|
||||
response -> response.getStatusLine().getStatusCode(), trackingActionListener, Collections.emptySet());
|
||||
RestStatus restStatus = randomFrom(RestStatus.values());
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
|
||||
httpResponse.setEntity(new StringEntity("{\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON));
|
||||
Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
|
||||
ResponseException responseException = new ResponseException(response);
|
||||
responseListener.onFailure(responseException);
|
||||
assertThat(trackingActionListener.exception.get(), instanceOf(ElasticsearchException.class));
|
||||
ElasticsearchException elasticsearchException = (ElasticsearchException)trackingActionListener.exception.get();
|
||||
assertEquals("Unable to parse response body", elasticsearchException.getMessage());
|
||||
assertEquals(restStatus, elasticsearchException.status());
|
||||
assertSame(responseException, elasticsearchException.getCause());
|
||||
assertThat(elasticsearchException.getSuppressed()[0], instanceOf(IllegalStateException.class));
|
||||
}
|
||||
}
|
||||
|
||||
public void testWrapResponseListenerOnResponseExceptionWithIgnores() throws IOException {
|
||||
TrackingActionListener trackingActionListener = new TrackingActionListener();
|
||||
ResponseListener responseListener = RestHighLevelClient.wrapResponseListener(
|
||||
response -> response.getStatusLine().getStatusCode(), trackingActionListener, Collections.singleton(404));
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND));
|
||||
Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
|
||||
ResponseException responseException = new ResponseException(response);
|
||||
responseListener.onFailure(responseException);
|
||||
//although we got an exception, we turn it into a successful response because the status code was provided among ignores
|
||||
assertNull(trackingActionListener.exception.get());
|
||||
assertEquals(404, trackingActionListener.statusCode.get());
|
||||
}
|
||||
|
||||
public void testWrapResponseListenerOnResponseExceptionWithIgnoresErrorNoBody() throws IOException {
|
||||
TrackingActionListener trackingActionListener = new TrackingActionListener();
|
||||
//response parsing throws exception while handling ignores. same as when GetResponse#fromXContent throws error when trying
|
||||
//to parse a 404 response which contains an error rather than a valid document not found response.
|
||||
ResponseListener responseListener = RestHighLevelClient.wrapResponseListener(
|
||||
response -> { throw new IllegalStateException(); }, trackingActionListener, Collections.singleton(404));
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND));
|
||||
Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
|
||||
ResponseException responseException = new ResponseException(response);
|
||||
responseListener.onFailure(responseException);
|
||||
assertThat(trackingActionListener.exception.get(), instanceOf(ElasticsearchException.class));
|
||||
ElasticsearchException elasticsearchException = (ElasticsearchException)trackingActionListener.exception.get();
|
||||
assertEquals(RestStatus.NOT_FOUND, elasticsearchException.status());
|
||||
assertSame(responseException, elasticsearchException.getCause());
|
||||
assertEquals(responseException.getMessage(), elasticsearchException.getMessage());
|
||||
}
|
||||
|
||||
public void testWrapResponseListenerOnResponseExceptionWithIgnoresErrorValidBody() throws IOException {
|
||||
TrackingActionListener trackingActionListener = new TrackingActionListener();
|
||||
//response parsing throws exception while handling ignores. same as when GetResponse#fromXContent throws error when trying
|
||||
//to parse a 404 response which contains an error rather than a valid document not found response.
|
||||
ResponseListener responseListener = RestHighLevelClient.wrapResponseListener(
|
||||
response -> { throw new IllegalStateException(); }, trackingActionListener, Collections.singleton(404));
|
||||
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND));
|
||||
httpResponse.setEntity(new StringEntity("{\"error\":\"test error message\",\"status\":404}",
|
||||
ContentType.APPLICATION_JSON));
|
||||
Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
|
||||
ResponseException responseException = new ResponseException(response);
|
||||
responseListener.onFailure(responseException);
|
||||
assertThat(trackingActionListener.exception.get(), instanceOf(ElasticsearchException.class));
|
||||
ElasticsearchException elasticsearchException = (ElasticsearchException)trackingActionListener.exception.get();
|
||||
assertEquals(RestStatus.NOT_FOUND, elasticsearchException.status());
|
||||
assertSame(responseException, elasticsearchException.getSuppressed()[0]);
|
||||
assertEquals("Elasticsearch exception [type=exception, reason=test error message]", elasticsearchException.getMessage());
|
||||
}
|
||||
|
||||
private static class TrackingActionListener implements ActionListener<Integer> {
|
||||
private final AtomicInteger statusCode = new AtomicInteger(-1);
|
||||
private final AtomicReference<Exception> exception = new AtomicReference<>();
|
||||
|
||||
@Override
|
||||
public void onResponse(Integer statusCode) {
|
||||
assertTrue(this.statusCode.compareAndSet(-1, statusCode));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
assertTrue(exception.compareAndSet(null, e));
|
||||
}
|
||||
}
|
||||
|
||||
private static class HeadersVarargMatcher extends ArgumentMatcher<Header[]> implements VarargMatcher {
|
||||
private Header[] expectedHeaders;
|
||||
|
||||
HeadersVarargMatcher(Header... expectedHeaders) {
|
||||
|
@ -84,4 +566,8 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
private static StatusLine newStatusLine(RestStatus restStatus) {
|
||||
return new BasicStatusLine(HTTP_PROTOCOL, restStatus.getStatus(), restStatus.name());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
e0feb9281a7da7a7df62398ab0fc655d51f68fed
|
|
@ -0,0 +1 @@
|
|||
886c1da9adc3347f61ab95ecbf4dbeeaa0e7acb2
|
|
@ -1 +0,0 @@
|
|||
14698ecbca1437615ee31d412d0edd3440b4fccf
|
|
@ -0,0 +1 @@
|
|||
df9e94f63ad7d9188f14820c435ea1dc3c28d87a
|
|
@ -1 +0,0 @@
|
|||
09dd516b847dcaf8da4e9096bf3189b0b3607aef
|
|
@ -0,0 +1 @@
|
|||
3539f8dc9c3ed8ebe90afcb3daa2e9afcf5108d1
|
|
@ -1 +0,0 @@
|
|||
68a8f986a0076ad784cbb20813b9465b94e4c846
|
|
@ -0,0 +1 @@
|
|||
da76338e4f299963da9d7ab33dae7586dfc902c2
|
|
@ -1 +0,0 @@
|
|||
6d921c1242b608a4dcd0784e32bcd94097ad93cd
|
|
@ -0,0 +1 @@
|
|||
f6318d120236c7ac03fca6bf98825b4cb4347fc8
|
|
@ -1 +0,0 @@
|
|||
74d3cdf1bc863e3836b06f1865c970127cc15f26
|
|
@ -0,0 +1 @@
|
|||
68f045ff272e10c307fe25a1867c2948b614b57c
|
|
@ -1 +0,0 @@
|
|||
dd13729c0b401e3df11bce0c343d1e00f07b9a19
|
|
@ -0,0 +1 @@
|
|||
b58a7a15267614a9a14f7cf6257454e0c24b146d
|
|
@ -1 +0,0 @@
|
|||
ce27abe3490bb8ccbebd2eefcb68f42a609ca986
|
|
@ -0,0 +1 @@
|
|||
d5f00fcd00fee6906b563d201bc00bdea7a92baa
|
|
@ -1 +0,0 @@
|
|||
bd1978e3fdac2fadf1068828b0b1b534a56873c3
|
|
@ -0,0 +1 @@
|
|||
2664901a494d87e9f4cef65be14cca918da7c4f5
|
|
@ -1 +0,0 @@
|
|||
fb8fe41948fccf13b5dbb5d50441cac974544ade
|
|
@ -0,0 +1 @@
|
|||
476a79293f9a15ea1ee5f93684587205d03480d1
|
|
@ -1 +0,0 @@
|
|||
e7f7d1ad298c4af264199d9199f34f2e4d9ca2b5
|
|
@ -0,0 +1 @@
|
|||
f4dd70223178cca067b0cade4e58c4d82bec87d6
|
|
@ -1 +0,0 @@
|
|||
5d4b3ce4df83d0509e0b5f7eecda72af458ba225
|
|
@ -0,0 +1 @@
|
|||
72c4ec5d811480164db556b54c7a76bd3ea16bd6
|
|
@ -1 +0,0 @@
|
|||
3b486b51d3aede074ab6de890b427379d40c0438
|
|
@ -0,0 +1 @@
|
|||
f7af3755fdd09df7c258c655aff03ddef9536a04
|
|
@ -1 +0,0 @@
|
|||
344097014aeaaa0f94a217f3697e14ceee06581f
|
|
@ -0,0 +1 @@
|
|||
2bf820109203b990e93a05dade8dcebec6aeb71a
|
|
@ -1 +0,0 @@
|
|||
6c4706b86718f2653120e0dbfd24e03248dd2ea7
|
|
@ -0,0 +1 @@
|
|||
fc1f32923ee68761ee05051f4ef6f4a4ab3acdec
|
|
@ -163,7 +163,7 @@ public abstract class BlendedTermQuery extends Query {
|
|||
if (prev > current) {
|
||||
actualDf++;
|
||||
}
|
||||
contexts[i] = ctx = adjustDF(ctx, Math.min(maxDoc, actualDf));
|
||||
contexts[i] = ctx = adjustDF(reader.getContext(), ctx, Math.min(maxDoc, actualDf));
|
||||
prev = current;
|
||||
if (sumTTF >= 0 && ctx.totalTermFreq() >= 0) {
|
||||
sumTTF += ctx.totalTermFreq();
|
||||
|
@ -179,16 +179,17 @@ public abstract class BlendedTermQuery extends Query {
|
|||
}
|
||||
// the blended sumTTF can't be greater than the sumTTTF on the field
|
||||
final long fixedTTF = sumTTF == -1 ? -1 : sumTTF;
|
||||
contexts[i] = adjustTTF(contexts[i], fixedTTF);
|
||||
contexts[i] = adjustTTF(reader.getContext(), contexts[i], fixedTTF);
|
||||
}
|
||||
}
|
||||
|
||||
private TermContext adjustTTF(TermContext termContext, long sumTTF) {
|
||||
private TermContext adjustTTF(IndexReaderContext readerContext, TermContext termContext, long sumTTF) {
|
||||
assert termContext.wasBuiltFor(readerContext);
|
||||
if (sumTTF == -1 && termContext.totalTermFreq() == -1) {
|
||||
return termContext;
|
||||
}
|
||||
TermContext newTermContext = new TermContext(termContext.topReaderContext);
|
||||
List<LeafReaderContext> leaves = termContext.topReaderContext.leaves();
|
||||
TermContext newTermContext = new TermContext(readerContext);
|
||||
List<LeafReaderContext> leaves = readerContext.leaves();
|
||||
final int len;
|
||||
if (leaves == null) {
|
||||
len = 1;
|
||||
|
@ -209,7 +210,8 @@ public abstract class BlendedTermQuery extends Query {
|
|||
return newTermContext;
|
||||
}
|
||||
|
||||
private static TermContext adjustDF(TermContext ctx, int newDocFreq) {
|
||||
private static TermContext adjustDF(IndexReaderContext readerContext, TermContext ctx, int newDocFreq) {
|
||||
assert ctx.wasBuiltFor(readerContext);
|
||||
// Use a value of ttf that is consistent with the doc freq (ie. gte)
|
||||
long newTTF;
|
||||
if (ctx.totalTermFreq() < 0) {
|
||||
|
@ -217,14 +219,14 @@ public abstract class BlendedTermQuery extends Query {
|
|||
} else {
|
||||
newTTF = Math.max(ctx.totalTermFreq(), newDocFreq);
|
||||
}
|
||||
List<LeafReaderContext> leaves = ctx.topReaderContext.leaves();
|
||||
List<LeafReaderContext> leaves = readerContext.leaves();
|
||||
final int len;
|
||||
if (leaves == null) {
|
||||
len = 1;
|
||||
} else {
|
||||
len = leaves.size();
|
||||
}
|
||||
TermContext newCtx = new TermContext(ctx.topReaderContext);
|
||||
TermContext newCtx = new TermContext(readerContext);
|
||||
for (int i = 0; i < len; ++i) {
|
||||
TermState termState = ctx.get(i);
|
||||
if (termState == null) {
|
||||
|
|
|
@ -486,7 +486,7 @@ public long ramBytesUsed() {
|
|||
}
|
||||
}
|
||||
|
||||
/** Non-null if this sugggester created a temp dir, needed only during build */
|
||||
/** Non-null if this suggester created a temp dir, needed only during build */
|
||||
private static FSDirectory tmpBuildDir;
|
||||
|
||||
@SuppressForbidden(reason = "access temp directory for building index")
|
||||
|
|
|
@ -30,11 +30,11 @@ import org.apache.lucene.search.PhraseQuery;
|
|||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.SynonymQuery;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.join.ToParentBlockJoinQuery;
|
||||
import org.apache.lucene.search.spans.SpanTermQuery;
|
||||
import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery;
|
||||
import org.elasticsearch.common.lucene.search.function.FiltersFunctionScoreQuery;
|
||||
import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery;
|
||||
import org.elasticsearch.index.search.ESToParentBlockJoinQuery;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
|
@ -77,8 +77,8 @@ public class CustomFieldQuery extends FieldQuery {
|
|||
} else if (sourceQuery instanceof BlendedTermQuery) {
|
||||
final BlendedTermQuery blendedTermQuery = (BlendedTermQuery) sourceQuery;
|
||||
flatten(blendedTermQuery.rewrite(reader), reader, flatQueries, boost);
|
||||
} else if (sourceQuery instanceof ToParentBlockJoinQuery) {
|
||||
ToParentBlockJoinQuery blockJoinQuery = (ToParentBlockJoinQuery) sourceQuery;
|
||||
} else if (sourceQuery instanceof ESToParentBlockJoinQuery) {
|
||||
ESToParentBlockJoinQuery blockJoinQuery = (ESToParentBlockJoinQuery) sourceQuery;
|
||||
flatten(blockJoinQuery.getChildQuery(), reader, flatQueries, boost);
|
||||
} else if (sourceQuery instanceof BoostingQuery) {
|
||||
BoostingQuery boostingQuery = (BoostingQuery) sourceQuery;
|
||||
|
|
|
@ -1,124 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.spatial.geopoint.search;
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.spatial.geopoint.document.GeoPointField.TermEncoding;
|
||||
|
||||
/** Implements a point distance range query on a GeoPoint field. This is based on
|
||||
* {@code org.apache.lucene.spatial.geopoint.search.GeoPointDistanceQuery} and is implemented using a
|
||||
* {@code org.apache.lucene.search.BooleanClause.MUST_NOT} clause to exclude any points that fall within
|
||||
* minRadiusMeters from the provided point.
|
||||
* <p>
|
||||
* NOTE: this query does not correctly support multi-value docs (see: https://issues.apache.org/jira/browse/LUCENE-7126)
|
||||
* <br>
|
||||
* TODO: remove this per ISSUE #17658
|
||||
**/
|
||||
public final class XGeoPointDistanceRangeQuery extends GeoPointDistanceQuery {
|
||||
/** minimum distance range (in meters) from lat, lon center location, maximum is inherited */
|
||||
protected final double minRadiusMeters;
|
||||
|
||||
/**
|
||||
* Constructs a query for all {@link org.apache.lucene.spatial.geopoint.document.GeoPointField} types within a minimum / maximum
|
||||
* distance (in meters) range from a given point
|
||||
*/
|
||||
public XGeoPointDistanceRangeQuery(final String field, final double centerLat, final double centerLon,
|
||||
final double minRadiusMeters, final double maxRadiusMeters) {
|
||||
this(field, TermEncoding.PREFIX, centerLat, centerLon, minRadiusMeters, maxRadiusMeters);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a query for all {@link org.apache.lucene.spatial.geopoint.document.GeoPointField} types within a minimum / maximum
|
||||
* distance (in meters) range from a given point. Accepts an optional
|
||||
* {@link org.apache.lucene.spatial.geopoint.document.GeoPointField.TermEncoding}
|
||||
*/
|
||||
public XGeoPointDistanceRangeQuery(final String field, final TermEncoding termEncoding, final double centerLat, final double centerLon,
|
||||
final double minRadiusMeters, final double maxRadius) {
|
||||
super(field, termEncoding, centerLat, centerLon, maxRadius);
|
||||
this.minRadiusMeters = minRadiusMeters;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query rewrite(IndexReader reader) {
|
||||
Query q = super.rewrite(reader);
|
||||
if (minRadiusMeters == 0.0) {
|
||||
return q;
|
||||
}
|
||||
|
||||
// add an exclusion query
|
||||
BooleanQuery.Builder bqb = new BooleanQuery.Builder();
|
||||
|
||||
// create a new exclusion query
|
||||
GeoPointDistanceQuery exclude = new GeoPointDistanceQuery(field, termEncoding, centerLat, centerLon, minRadiusMeters);
|
||||
// full map search
|
||||
// if (radiusMeters >= GeoProjectionUtils.SEMIMINOR_AXIS) {
|
||||
// bqb.add(new BooleanClause(new GeoPointInBBoxQuery(this.field, -180.0, -90.0, 180.0, 90.0), BooleanClause.Occur.MUST));
|
||||
// } else {
|
||||
bqb.add(new BooleanClause(q, BooleanClause.Occur.MUST));
|
||||
// }
|
||||
bqb.add(new BooleanClause(exclude, BooleanClause.Occur.MUST_NOT));
|
||||
|
||||
return bqb.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString(String field) {
|
||||
final StringBuilder sb = new StringBuilder();
|
||||
sb.append(getClass().getSimpleName());
|
||||
sb.append(':');
|
||||
if (!this.field.equals(field)) {
|
||||
sb.append(" field=");
|
||||
sb.append(this.field);
|
||||
sb.append(':');
|
||||
}
|
||||
return sb.append( " Center: [")
|
||||
.append(centerLat)
|
||||
.append(',')
|
||||
.append(centerLon)
|
||||
.append(']')
|
||||
.append(" From Distance: ")
|
||||
.append(minRadiusMeters)
|
||||
.append(" m")
|
||||
.append(" To Distance: ")
|
||||
.append(radiusMeters)
|
||||
.append(" m")
|
||||
.append(" Lower Left: [")
|
||||
.append(minLat)
|
||||
.append(',')
|
||||
.append(minLon)
|
||||
.append(']')
|
||||
.append(" Upper Right: [")
|
||||
.append(maxLat)
|
||||
.append(',')
|
||||
.append(maxLon)
|
||||
.append("]")
|
||||
.toString();
|
||||
}
|
||||
|
||||
/** getter method for minimum distance */
|
||||
public double getMinRadiusMeters() {
|
||||
return this.minRadiusMeters;
|
||||
}
|
||||
|
||||
/** getter method for maximum distance */
|
||||
public double getMaxRadiusMeters() {
|
||||
return this.radiusMeters;
|
||||
}
|
||||
}
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch;
|
||||
|
||||
import org.elasticsearch.common.io.FileSystemUtils;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
|
@ -44,7 +45,7 @@ public class Build {
|
|||
|
||||
final URL url = getElasticsearchCodebase();
|
||||
if (url.toString().endsWith(".jar")) {
|
||||
try (JarInputStream jar = new JarInputStream(url.openStream())) {
|
||||
try (JarInputStream jar = new JarInputStream(FileSystemUtils.openFileURLStream(url))) {
|
||||
Manifest manifest = jar.getManifest();
|
||||
shortHash = manifest.getMainAttributes().getValue("Change");
|
||||
date = manifest.getMainAttributes().getValue("Build-Date");
|
||||
|
@ -79,10 +80,10 @@ public class Build {
|
|||
return Build.class.getProtectionDomain().getCodeSource().getLocation();
|
||||
}
|
||||
|
||||
private String shortHash;
|
||||
private String date;
|
||||
private final String shortHash;
|
||||
private final String date;
|
||||
|
||||
Build(String shortHash, String date, boolean isSnapshot) {
|
||||
public Build(String shortHash, String date, boolean isSnapshot) {
|
||||
this.shortHash = shortHash;
|
||||
this.date = date;
|
||||
this.isSnapshot = isSnapshot;
|
||||
|
|
|
@ -586,7 +586,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
return new ElasticsearchException(buildMessage("exception", parser.text(), null));
|
||||
}
|
||||
|
||||
ensureExpectedToken(token, XContentParser.Token.START_OBJECT, parser::getTokenLocation);
|
||||
ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser::getTokenLocation);
|
||||
token = parser.nextToken();
|
||||
|
||||
// Root causes are parsed in the innerFromXContent() and are added as suppressed exceptions.
|
||||
|
|
|
@ -109,12 +109,16 @@ public class Version implements Comparable<Version> {
|
|||
public static final int V_5_2_0_ID_UNRELEASED = 5020099;
|
||||
public static final Version V_5_2_0_UNRELEASED = new Version(V_5_2_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_0);
|
||||
public static final int V_5_2_1_ID_UNRELEASED = 5020199;
|
||||
public static final Version V_5_2_1_UNRELEASED = new Version(V_5_2_1_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_0);
|
||||
public static final Version V_5_2_1_UNRELEASED = new Version(V_5_2_1_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_1);
|
||||
public static final int V_5_2_2_ID_UNRELEASED = 5020299;
|
||||
public static final Version V_5_2_2_UNRELEASED = new Version(V_5_2_2_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_1);
|
||||
public static final int V_5_3_0_ID_UNRELEASED = 5030099;
|
||||
public static final Version V_5_3_0_UNRELEASED = new Version(V_5_3_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_0);
|
||||
public static final Version V_5_3_0_UNRELEASED = new Version(V_5_3_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_1);
|
||||
public static final int V_5_4_0_ID_UNRELEASED = 5040099;
|
||||
public static final Version V_5_4_0_UNRELEASED = new Version(V_5_4_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_5_0);
|
||||
public static final int V_6_0_0_alpha1_ID_UNRELEASED = 6000001;
|
||||
public static final Version V_6_0_0_alpha1_UNRELEASED =
|
||||
new Version(V_6_0_0_alpha1_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_0);
|
||||
new Version(V_6_0_0_alpha1_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_5_0);
|
||||
public static final Version CURRENT = V_6_0_0_alpha1_UNRELEASED;
|
||||
|
||||
// unreleased versions must be added to the above list with the suffix _UNRELEASED (with the exception of CURRENT)
|
||||
|
@ -134,6 +138,8 @@ public class Version implements Comparable<Version> {
|
|||
return V_6_0_0_alpha1_UNRELEASED;
|
||||
case V_5_3_0_ID_UNRELEASED:
|
||||
return V_5_3_0_UNRELEASED;
|
||||
case V_5_2_2_ID_UNRELEASED:
|
||||
return V_5_2_2_UNRELEASED;
|
||||
case V_5_2_1_ID_UNRELEASED:
|
||||
return V_5_2_1_UNRELEASED;
|
||||
case V_5_2_0_ID_UNRELEASED:
|
||||
|
|
|
@ -234,7 +234,6 @@ import org.elasticsearch.rest.action.admin.cluster.RestPutStoredScriptAction;
|
|||
import org.elasticsearch.rest.action.admin.cluster.RestRestoreSnapshotAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.RestSnapshotsStatusAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.RestVerifyRepositoryAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestAliasesExistAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestAnalyzeAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestClearIndicesCacheAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestCloseIndexAction;
|
||||
|
@ -249,11 +248,9 @@ import org.elasticsearch.rest.action.admin.indices.RestGetIndexTemplateAction;
|
|||
import org.elasticsearch.rest.action.admin.indices.RestGetIndicesAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestGetMappingAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestGetSettingsAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestHeadIndexTemplateAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestIndexDeleteAliasesAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestIndexPutAliasAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestIndicesAliasesAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestIndicesExistsAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestIndicesSegmentsAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestIndicesShardStoresAction;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestIndicesStatsAction;
|
||||
|
@ -291,7 +288,6 @@ import org.elasticsearch.rest.action.document.RestBulkAction;
|
|||
import org.elasticsearch.rest.action.document.RestDeleteAction;
|
||||
import org.elasticsearch.rest.action.document.RestGetAction;
|
||||
import org.elasticsearch.rest.action.document.RestGetSourceAction;
|
||||
import org.elasticsearch.rest.action.document.RestHeadAction;
|
||||
import org.elasticsearch.rest.action.document.RestIndexAction;
|
||||
import org.elasticsearch.rest.action.document.RestMultiGetAction;
|
||||
import org.elasticsearch.rest.action.document.RestMultiTermVectorsAction;
|
||||
|
@ -528,14 +524,12 @@ public class ActionModule extends AbstractModule {
|
|||
registerHandler.accept(new RestDeleteSnapshotAction(settings, restController));
|
||||
registerHandler.accept(new RestSnapshotsStatusAction(settings, restController));
|
||||
|
||||
registerHandler.accept(new RestIndicesExistsAction(settings, restController));
|
||||
registerHandler.accept(new RestTypesExistsAction(settings, restController));
|
||||
registerHandler.accept(new RestGetIndicesAction(settings, restController, indexScopedSettings, settingsFilter));
|
||||
registerHandler.accept(new RestIndicesStatsAction(settings, restController));
|
||||
registerHandler.accept(new RestIndicesSegmentsAction(settings, restController));
|
||||
registerHandler.accept(new RestIndicesShardStoresAction(settings, restController));
|
||||
registerHandler.accept(new RestGetAliasesAction(settings, restController));
|
||||
registerHandler.accept(new RestAliasesExistAction(settings, restController));
|
||||
registerHandler.accept(new RestIndexDeleteAliasesAction(settings, restController));
|
||||
registerHandler.accept(new RestIndexPutAliasAction(settings, restController));
|
||||
registerHandler.accept(new RestIndicesAliasesAction(settings, restController));
|
||||
|
@ -553,7 +547,6 @@ public class ActionModule extends AbstractModule {
|
|||
registerHandler.accept(new RestGetIndexTemplateAction(settings, restController));
|
||||
registerHandler.accept(new RestPutIndexTemplateAction(settings, restController));
|
||||
registerHandler.accept(new RestDeleteIndexTemplateAction(settings, restController));
|
||||
registerHandler.accept(new RestHeadIndexTemplateAction(settings, restController));
|
||||
|
||||
registerHandler.accept(new RestPutMappingAction(settings, restController));
|
||||
registerHandler.accept(new RestGetMappingAction(settings, restController));
|
||||
|
@ -569,8 +562,6 @@ public class ActionModule extends AbstractModule {
|
|||
registerHandler.accept(new RestIndexAction(settings, restController));
|
||||
registerHandler.accept(new RestGetAction(settings, restController));
|
||||
registerHandler.accept(new RestGetSourceAction(settings, restController));
|
||||
registerHandler.accept(new RestHeadAction.Document(settings, restController));
|
||||
registerHandler.accept(new RestHeadAction.Source(settings, restController));
|
||||
registerHandler.accept(new RestMultiGetAction(settings, restController));
|
||||
registerHandler.accept(new RestDeleteAction(settings, restController));
|
||||
registerHandler.accept(new org.elasticsearch.rest.action.document.RestCountAction(settings, restController));
|
||||
|
|
|
@ -23,27 +23,28 @@ import org.elasticsearch.action.support.WriteRequest;
|
|||
import org.elasticsearch.action.support.WriteRequest.RefreshPolicy;
|
||||
import org.elasticsearch.action.support.WriteResponse;
|
||||
import org.elasticsearch.action.support.replication.ReplicationResponse;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.StatusToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.seqno.SequenceNumbersService;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.io.UnsupportedEncodingException;
|
||||
import java.net.URLEncoder;
|
||||
import java.util.Locale;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
|
||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;
|
||||
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
|
||||
import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownField;
|
||||
import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownToken;
|
||||
|
||||
/**
|
||||
* A base class for the response of a write operation that involves a single doc
|
||||
|
@ -196,36 +197,49 @@ public abstract class DocWriteResponse extends ReplicationResponse implements Wr
|
|||
}
|
||||
|
||||
/** returns the rest status for this response (based on {@link ShardInfo#status()} */
|
||||
@Override
|
||||
public RestStatus status() {
|
||||
return getShardInfo().status();
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the location of the written document as a string suitable for a {@code Location} header.
|
||||
* @param routing any routing used in the request. If null the location doesn't include routing information.
|
||||
* Return the relative URI for the location of the document suitable for use in the {@code Location} header. The use of relative URIs is
|
||||
* permitted as of HTTP/1.1 (cf. https://tools.ietf.org/html/rfc7231#section-7.1.2).
|
||||
*
|
||||
* @param routing custom routing or {@code null} if custom routing is not used
|
||||
* @return the relative URI for the location of the document
|
||||
*/
|
||||
public String getLocation(@Nullable String routing) throws URISyntaxException {
|
||||
// Absolute path for the location of the document. This should be allowed as of HTTP/1.1:
|
||||
// https://tools.ietf.org/html/rfc7231#section-7.1.2
|
||||
String index = getIndex();
|
||||
String type = getType();
|
||||
String id = getId();
|
||||
String routingStart = "?routing=";
|
||||
int bufferSize = 3 + index.length() + type.length() + id.length();
|
||||
if (routing != null) {
|
||||
bufferSize += routingStart.length() + routing.length();
|
||||
public String getLocation(@Nullable String routing) {
|
||||
final String encodedIndex;
|
||||
final String encodedType;
|
||||
final String encodedId;
|
||||
final String encodedRouting;
|
||||
try {
|
||||
// encode the path components separately otherwise the path separators will be encoded
|
||||
encodedIndex = URLEncoder.encode(getIndex(), "UTF-8");
|
||||
encodedType = URLEncoder.encode(getType(), "UTF-8");
|
||||
encodedId = URLEncoder.encode(getId(), "UTF-8");
|
||||
encodedRouting = routing == null ? null : URLEncoder.encode(routing, "UTF-8");
|
||||
} catch (final UnsupportedEncodingException e) {
|
||||
throw new AssertionError(e);
|
||||
}
|
||||
StringBuilder location = new StringBuilder(bufferSize);
|
||||
location.append('/').append(index);
|
||||
location.append('/').append(type);
|
||||
location.append('/').append(id);
|
||||
if (routing != null) {
|
||||
location.append(routingStart).append(routing);
|
||||
final String routingStart = "?routing=";
|
||||
final int bufferSizeExcludingRouting = 3 + encodedIndex.length() + encodedType.length() + encodedId.length();
|
||||
final int bufferSize;
|
||||
if (encodedRouting == null) {
|
||||
bufferSize = bufferSizeExcludingRouting;
|
||||
} else {
|
||||
bufferSize = bufferSizeExcludingRouting + routingStart.length() + encodedRouting.length();
|
||||
}
|
||||
final StringBuilder location = new StringBuilder(bufferSize);
|
||||
location.append('/').append(encodedIndex);
|
||||
location.append('/').append(encodedType);
|
||||
location.append('/').append(encodedId);
|
||||
if (encodedRouting != null) {
|
||||
location.append(routingStart).append(encodedRouting);
|
||||
}
|
||||
|
||||
URI uri = new URI(location.toString());
|
||||
return uri.toASCIIString();
|
||||
return location.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -284,16 +298,113 @@ public abstract class DocWriteResponse extends ReplicationResponse implements Wr
|
|||
}
|
||||
|
||||
/**
|
||||
* Declare the {@link ObjectParser} fields to use when parsing a {@link DocWriteResponse}
|
||||
* Parse the output of the {@link #innerToXContent(XContentBuilder, Params)} method.
|
||||
*
|
||||
* This method is intended to be called by subclasses and must be called multiple times to parse all the information concerning
|
||||
* {@link DocWriteResponse} objects. It always parses the current token, updates the given parsing context accordingly
|
||||
* if needed and then immediately returns.
|
||||
*/
|
||||
protected static void declareParserFields(ConstructingObjectParser<? extends DocWriteResponse, Void> objParser) {
|
||||
objParser.declareString(constructorArg(), new ParseField(_INDEX));
|
||||
objParser.declareString(constructorArg(), new ParseField(_TYPE));
|
||||
objParser.declareString(constructorArg(), new ParseField(_ID));
|
||||
objParser.declareLong(constructorArg(), new ParseField(_VERSION));
|
||||
objParser.declareString(constructorArg(), new ParseField(RESULT));
|
||||
objParser.declareObject(optionalConstructorArg(), (p, c) -> ShardInfo.fromXContent(p), new ParseField(_SHARDS));
|
||||
objParser.declareLong(optionalConstructorArg(), new ParseField(_SEQ_NO));
|
||||
objParser.declareBoolean(DocWriteResponse::setForcedRefresh, new ParseField(FORCED_REFRESH));
|
||||
protected static void parseInnerToXContent(XContentParser parser, DocWriteResponseBuilder context) throws IOException {
|
||||
XContentParser.Token token = parser.currentToken();
|
||||
ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser::getTokenLocation);
|
||||
|
||||
String currentFieldName = parser.currentName();
|
||||
token = parser.nextToken();
|
||||
|
||||
if (token.isValue()) {
|
||||
if (_INDEX.equals(currentFieldName)) {
|
||||
// index uuid and shard id are unknown and can't be parsed back for now.
|
||||
context.setShardId(new ShardId(new Index(parser.text(), IndexMetaData.INDEX_UUID_NA_VALUE), -1));
|
||||
} else if (_TYPE.equals(currentFieldName)) {
|
||||
context.setType(parser.text());
|
||||
} else if (_ID.equals(currentFieldName)) {
|
||||
context.setId(parser.text());
|
||||
} else if (_VERSION.equals(currentFieldName)) {
|
||||
context.setVersion(parser.longValue());
|
||||
} else if (RESULT.equals(currentFieldName)) {
|
||||
String result = parser.text();
|
||||
for (Result r : Result.values()) {
|
||||
if (r.getLowercase().equals(result)) {
|
||||
context.setResult(r);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else if (FORCED_REFRESH.equals(currentFieldName)) {
|
||||
context.setForcedRefresh(parser.booleanValue());
|
||||
} else if (_SEQ_NO.equals(currentFieldName)) {
|
||||
context.setSeqNo(parser.longValue());
|
||||
} else {
|
||||
throwUnknownField(currentFieldName, parser.getTokenLocation());
|
||||
}
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
if (_SHARDS.equals(currentFieldName)) {
|
||||
context.setShardInfo(ShardInfo.fromXContent(parser));
|
||||
} else {
|
||||
throwUnknownField(currentFieldName, parser.getTokenLocation());
|
||||
}
|
||||
} else {
|
||||
throwUnknownToken(token, parser.getTokenLocation());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* {@link DocWriteResponseBuilder} is used to build {@link DocWriteResponse} objects during XContent parsing.
|
||||
*/
|
||||
public abstract static class DocWriteResponseBuilder {
|
||||
|
||||
protected ShardId shardId = null;
|
||||
protected String type = null;
|
||||
protected String id = null;
|
||||
protected Long version = null;
|
||||
protected Result result = null;
|
||||
protected boolean forcedRefresh;
|
||||
protected ShardInfo shardInfo = null;
|
||||
protected Long seqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO;
|
||||
|
||||
public ShardId getShardId() {
|
||||
return shardId;
|
||||
}
|
||||
|
||||
public void setShardId(ShardId shardId) {
|
||||
this.shardId = shardId;
|
||||
}
|
||||
|
||||
public String getType() {
|
||||
return type;
|
||||
}
|
||||
|
||||
public void setType(String type) {
|
||||
this.type = type;
|
||||
}
|
||||
|
||||
public String getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public void setId(String id) {
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
public void setVersion(Long version) {
|
||||
this.version = version;
|
||||
}
|
||||
|
||||
public void setResult(Result result) {
|
||||
this.result = result;
|
||||
}
|
||||
|
||||
public void setForcedRefresh(boolean forcedRefresh) {
|
||||
this.forcedRefresh = forcedRefresh;
|
||||
}
|
||||
|
||||
public void setShardInfo(ShardInfo shardInfo) {
|
||||
this.shardInfo = shardInfo;
|
||||
}
|
||||
|
||||
public void setSeqNo(Long seqNo) {
|
||||
this.seqNo = seqNo;
|
||||
}
|
||||
|
||||
public abstract DocWriteResponse build();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.allocation;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.support.master.MasterNodeRequest;
|
||||
|
@ -222,12 +221,7 @@ public class ClusterAllocationExplainRequest extends MasterNodeRequest<ClusterAl
|
|||
}
|
||||
|
||||
public static ClusterAllocationExplainRequest parse(XContentParser parser) throws IOException {
|
||||
ClusterAllocationExplainRequest req = PARSER.parse(parser, new ClusterAllocationExplainRequest(), null);
|
||||
Exception e = req.validate();
|
||||
if (e != null) {
|
||||
throw new ElasticsearchParseException("'index', 'shard', and 'primary' must be specified in allocation explain request", e);
|
||||
}
|
||||
return req;
|
||||
return PARSER.parse(parser, new ClusterAllocationExplainRequest(), null);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -106,7 +106,7 @@ public class PutRepositoryRequestBuilder extends AcknowledgedRequestBuilder<PutR
|
|||
* Sets the repository settings in Json or Yaml format
|
||||
*
|
||||
* @param source repository settings
|
||||
* @param xContentType the contenty type of the source
|
||||
* @param xContentType the content type of the source
|
||||
* @return this builder
|
||||
*/
|
||||
public PutRepositoryRequestBuilder setSettings(String source, XContentType xContentType) {
|
||||
|
|
|
@ -29,7 +29,7 @@ import org.elasticsearch.common.util.ArrayUtils;
|
|||
public class SnapshotsStatusRequestBuilder extends MasterNodeOperationRequestBuilder<SnapshotsStatusRequest, SnapshotsStatusResponse, SnapshotsStatusRequestBuilder> {
|
||||
|
||||
/**
|
||||
* Constructs the new snapshotstatus request
|
||||
* Constructs the new snapshot status request
|
||||
*/
|
||||
public SnapshotsStatusRequestBuilder(ElasticsearchClient client, SnapshotsStatusAction action) {
|
||||
super(client, action, new SnapshotsStatusRequest());
|
||||
|
|
|
@ -69,7 +69,7 @@ public class ClusterStateRequestBuilder extends MasterNodeReadOperationRequestBu
|
|||
}
|
||||
|
||||
/**
|
||||
* Should the cluster state result include teh {@link org.elasticsearch.cluster.routing.RoutingTable}. Defaults
|
||||
* Should the cluster state result include the {@link org.elasticsearch.cluster.routing.RoutingTable}. Defaults
|
||||
* to <tt>true</tt>.
|
||||
*/
|
||||
public ClusterStateRequestBuilder setRoutingTable(boolean filter) {
|
||||
|
|
|
@ -137,7 +137,7 @@ public class PutStoredScriptRequest extends AcknowledgedRequest<PutStoredScriptR
|
|||
|
||||
id = in.readOptionalString();
|
||||
content = in.readBytesReference();
|
||||
if (in.getVersion().after(Version.V_5_3_0_UNRELEASED)) { // TODO update to onOrAfter after backporting
|
||||
if (in.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
|
||||
xContentType = XContentType.readFrom(in);
|
||||
} else {
|
||||
xContentType = XContentFactory.xContentType(content);
|
||||
|
@ -151,7 +151,7 @@ public class PutStoredScriptRequest extends AcknowledgedRequest<PutStoredScriptR
|
|||
out.writeString(lang == null ? "" : lang);
|
||||
out.writeOptionalString(id);
|
||||
out.writeBytesReference(content);
|
||||
if (out.getVersion().after(Version.V_5_3_0_UNRELEASED)) { // TODO update to onOrAfter after backporting
|
||||
if (out.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
|
||||
xContentType.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -53,6 +53,7 @@ import org.elasticsearch.index.analysis.NamedAnalyzer;
|
|||
import org.elasticsearch.index.analysis.TokenFilterFactory;
|
||||
import org.elasticsearch.index.analysis.TokenizerFactory;
|
||||
import org.elasticsearch.index.mapper.AllFieldMapper;
|
||||
import org.elasticsearch.index.mapper.KeywordFieldMapper;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
|
@ -131,10 +132,17 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
|
|||
}
|
||||
MappedFieldType fieldType = indexService.mapperService().fullName(request.field());
|
||||
if (fieldType != null) {
|
||||
if (fieldType.tokenized() == false) {
|
||||
if (fieldType.tokenized()) {
|
||||
analyzer = fieldType.indexAnalyzer();
|
||||
} else if (fieldType instanceof KeywordFieldMapper.KeywordFieldType) {
|
||||
analyzer = ((KeywordFieldMapper.KeywordFieldType) fieldType).normalizer();
|
||||
if (analyzer == null) {
|
||||
// this will be KeywordAnalyzer
|
||||
analyzer = fieldType.indexAnalyzer();
|
||||
}
|
||||
} else {
|
||||
throw new IllegalArgumentException("Can't process field [" + request.field() + "], Analysis requests are only supported on tokenized fields");
|
||||
}
|
||||
analyzer = fieldType.indexAnalyzer();
|
||||
field = fieldType.name();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.action.admin.indices.shards;
|
|||
import com.carrotsearch.hppc.cursors.IntObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
|
||||
|
@ -55,7 +56,6 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
*/
|
||||
public static class StoreStatus implements Streamable, ToXContent, Comparable<StoreStatus> {
|
||||
private DiscoveryNode node;
|
||||
private long legacyVersion;
|
||||
private String allocationId;
|
||||
private Exception storeException;
|
||||
private AllocationStatus allocationStatus;
|
||||
|
@ -116,9 +116,8 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
private StoreStatus() {
|
||||
}
|
||||
|
||||
public StoreStatus(DiscoveryNode node, long legacyVersion, String allocationId, AllocationStatus allocationStatus, Exception storeException) {
|
||||
public StoreStatus(DiscoveryNode node, String allocationId, AllocationStatus allocationStatus, Exception storeException) {
|
||||
this.node = node;
|
||||
this.legacyVersion = legacyVersion;
|
||||
this.allocationId = allocationId;
|
||||
this.allocationStatus = allocationStatus;
|
||||
this.storeException = storeException;
|
||||
|
@ -131,13 +130,6 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
return node;
|
||||
}
|
||||
|
||||
/**
|
||||
* Version of the store for pre-3.0 shards that have not yet been active
|
||||
*/
|
||||
public long getLegacyVersion() {
|
||||
return legacyVersion;
|
||||
}
|
||||
|
||||
/**
|
||||
* AllocationStatus id of the store, used to select the store that will be
|
||||
* used as a primary.
|
||||
|
@ -173,7 +165,10 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
node = new DiscoveryNode(in);
|
||||
legacyVersion = in.readLong();
|
||||
if (in.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
// legacy version
|
||||
in.readLong();
|
||||
}
|
||||
allocationId = in.readOptionalString();
|
||||
allocationStatus = AllocationStatus.readFrom(in);
|
||||
if (in.readBoolean()) {
|
||||
|
@ -184,7 +179,10 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
node.writeTo(out);
|
||||
out.writeLong(legacyVersion);
|
||||
if (out.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
// legacy version
|
||||
out.writeLong(-1L);
|
||||
}
|
||||
out.writeOptionalString(allocationId);
|
||||
allocationStatus.writeTo(out);
|
||||
if (storeException != null) {
|
||||
|
@ -198,9 +196,6 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
node.toXContent(builder, params);
|
||||
if (legacyVersion != ShardStateMetaData.NO_VERSION) {
|
||||
builder.field(Fields.LEGACY_VERSION, legacyVersion);
|
||||
}
|
||||
if (allocationId != null) {
|
||||
builder.field(Fields.ALLOCATION_ID, allocationId);
|
||||
}
|
||||
|
@ -225,11 +220,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
} else if (allocationId == null && other.allocationId != null) {
|
||||
return 1;
|
||||
} else if (allocationId == null && other.allocationId == null) {
|
||||
int compare = Long.compare(other.legacyVersion, legacyVersion);
|
||||
if (compare == 0) {
|
||||
return Integer.compare(allocationStatus.id, other.allocationStatus.id);
|
||||
}
|
||||
return compare;
|
||||
return Integer.compare(allocationStatus.id, other.allocationStatus.id);
|
||||
} else {
|
||||
int compare = Integer.compare(allocationStatus.id, other.allocationStatus.id);
|
||||
if (compare == 0) {
|
||||
|
@ -405,7 +396,6 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
static final String FAILURES = "failures";
|
||||
static final String STORES = "stores";
|
||||
// StoreStatus fields
|
||||
static final String LEGACY_VERSION = "legacy_version";
|
||||
static final String ALLOCATION_ID = "allocation_id";
|
||||
static final String STORE_EXCEPTION = "store_exception";
|
||||
static final String ALLOCATED = "allocation";
|
||||
|
|
|
@ -180,7 +180,7 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
|
|||
for (NodeGatewayStartedShards response : fetchResponse.responses) {
|
||||
if (shardExistsInNode(response)) {
|
||||
IndicesShardStoresResponse.StoreStatus.AllocationStatus allocationStatus = getAllocationStatus(fetchResponse.shardId.getIndexName(), fetchResponse.shardId.id(), response.getNode());
|
||||
storeStatuses.add(new IndicesShardStoresResponse.StoreStatus(response.getNode(), response.legacyVersion(), response.allocationId(), allocationStatus, response.storeException()));
|
||||
storeStatuses.add(new IndicesShardStoresResponse.StoreStatus(response.getNode(), response.allocationId(), allocationStatus, response.storeException()));
|
||||
}
|
||||
}
|
||||
CollectionUtil.timSort(storeStatuses);
|
||||
|
@ -213,7 +213,7 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
|
|||
* A shard exists/existed in a node only if shard state file exists in the node
|
||||
*/
|
||||
private boolean shardExistsInNode(final NodeGatewayStartedShards response) {
|
||||
return response.storeException() != null || response.legacyVersion() != -1 || response.allocationId() != null;
|
||||
return response.storeException() != null || response.allocationId() != null;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.elasticsearch.action.DocWriteResponse;
|
|||
import org.elasticsearch.action.delete.DeleteResponse;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.action.update.UpdateResponse;
|
||||
import org.elasticsearch.common.CheckedConsumer;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -35,9 +36,14 @@ import org.elasticsearch.common.io.stream.Writeable;
|
|||
import org.elasticsearch.common.xcontent.StatusToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
|
||||
import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownField;
|
||||
|
||||
/**
|
||||
* Represents a single item response for an action executed as part of the bulk API. Holds the index/type/id
|
||||
|
@ -45,6 +51,12 @@ import java.io.IOException;
|
|||
*/
|
||||
public class BulkItemResponse implements Streamable, StatusToXContentObject {
|
||||
|
||||
private static final String _INDEX = "_index";
|
||||
private static final String _TYPE = "_type";
|
||||
private static final String _ID = "_id";
|
||||
private static final String STATUS = "status";
|
||||
private static final String ERROR = "error";
|
||||
|
||||
@Override
|
||||
public RestStatus status() {
|
||||
return failure == null ? response.status() : failure.getStatus();
|
||||
|
@ -56,13 +68,13 @@ public class BulkItemResponse implements Streamable, StatusToXContentObject {
|
|||
builder.startObject(opType.getLowercase());
|
||||
if (failure == null) {
|
||||
response.innerToXContent(builder, params);
|
||||
builder.field(Fields.STATUS, response.status().getStatus());
|
||||
builder.field(STATUS, response.status().getStatus());
|
||||
} else {
|
||||
builder.field(Fields._INDEX, failure.getIndex());
|
||||
builder.field(Fields._TYPE, failure.getType());
|
||||
builder.field(Fields._ID, failure.getId());
|
||||
builder.field(Fields.STATUS, failure.getStatus().getStatus());
|
||||
builder.startObject(Fields.ERROR);
|
||||
builder.field(_INDEX, failure.getIndex());
|
||||
builder.field(_TYPE, failure.getType());
|
||||
builder.field(_ID, failure.getId());
|
||||
builder.field(STATUS, failure.getStatus().getStatus());
|
||||
builder.startObject(ERROR);
|
||||
ElasticsearchException.generateThrowableXContent(builder, params, failure.getCause());
|
||||
builder.endObject();
|
||||
}
|
||||
|
@ -71,12 +83,73 @@ public class BulkItemResponse implements Streamable, StatusToXContentObject {
|
|||
return builder;
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final String _INDEX = "_index";
|
||||
static final String _TYPE = "_type";
|
||||
static final String _ID = "_id";
|
||||
static final String STATUS = "status";
|
||||
static final String ERROR = "error";
|
||||
/**
|
||||
* Reads a {@link BulkItemResponse} from a {@link XContentParser}.
|
||||
*
|
||||
* @param parser the {@link XContentParser}
|
||||
* @param id the id to assign to the parsed {@link BulkItemResponse}. It is usually the index of
|
||||
* the item in the {@link BulkResponse#getItems} array.
|
||||
*/
|
||||
public static BulkItemResponse fromXContent(XContentParser parser, int id) throws IOException {
|
||||
ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser::getTokenLocation);
|
||||
|
||||
XContentParser.Token token = parser.nextToken();
|
||||
ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser::getTokenLocation);
|
||||
|
||||
String currentFieldName = parser.currentName();
|
||||
token = parser.nextToken();
|
||||
|
||||
final OpType opType = OpType.fromString(currentFieldName);
|
||||
ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser::getTokenLocation);
|
||||
|
||||
DocWriteResponse.DocWriteResponseBuilder builder = null;
|
||||
CheckedConsumer<XContentParser, IOException> itemParser = null;
|
||||
|
||||
if (opType == OpType.INDEX || opType == OpType.CREATE) {
|
||||
final IndexResponse.IndexResponseBuilder indexResponseBuilder = new IndexResponse.IndexResponseBuilder();
|
||||
builder = indexResponseBuilder;
|
||||
itemParser = (indexParser) -> IndexResponse.parseXContentFields(indexParser, indexResponseBuilder);
|
||||
|
||||
} else if (opType == OpType.UPDATE) {
|
||||
final UpdateResponse.UpdateResponseBuilder updateResponseBuilder = new UpdateResponse.UpdateResponseBuilder();
|
||||
builder = updateResponseBuilder;
|
||||
itemParser = (updateParser) -> UpdateResponse.parseXContentFields(updateParser, updateResponseBuilder);
|
||||
|
||||
} else if (opType == OpType.DELETE) {
|
||||
final DeleteResponse.DeleteResponseBuilder deleteResponseBuilder = new DeleteResponse.DeleteResponseBuilder();
|
||||
builder = deleteResponseBuilder;
|
||||
itemParser = (deleteParser) -> DeleteResponse.parseXContentFields(deleteParser, deleteResponseBuilder);
|
||||
} else {
|
||||
throwUnknownField(currentFieldName, parser.getTokenLocation());
|
||||
}
|
||||
|
||||
ElasticsearchException exception = null;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
}
|
||||
|
||||
if (ERROR.equals(currentFieldName)) {
|
||||
if (token == XContentParser.Token.START_OBJECT) {
|
||||
exception = ElasticsearchException.fromXContent(parser);
|
||||
}
|
||||
} else if (STATUS.equals(currentFieldName) == false) {
|
||||
itemParser.accept(parser);
|
||||
}
|
||||
}
|
||||
|
||||
ensureExpectedToken(XContentParser.Token.END_OBJECT, token, parser::getTokenLocation);
|
||||
token = parser.nextToken();
|
||||
ensureExpectedToken(XContentParser.Token.END_OBJECT, token, parser::getTokenLocation);
|
||||
|
||||
BulkItemResponse bulkItemResponse;
|
||||
if (exception != null) {
|
||||
Failure failure = new Failure(builder.getShardId().getIndexName(), builder.getType(), builder.getId(), exception);
|
||||
bulkItemResponse = new BulkItemResponse(id, opType, failure);
|
||||
} else {
|
||||
bulkItemResponse = new BulkItemResponse(id, opType, builder.build());
|
||||
}
|
||||
return bulkItemResponse;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.elasticsearch.common.unit.ByteSizeValue;
|
|||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.EsExecutors;
|
||||
import org.elasticsearch.common.util.concurrent.FutureUtils;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.util.Objects;
|
||||
|
@ -288,16 +289,46 @@ public class BulkProcessor implements Closeable {
|
|||
executeIfNeeded();
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds the data from the bytes to be processed by the bulk processor
|
||||
* @deprecated use {@link #add(BytesReference, String, String, XContentType)} instead to avoid content type auto-detection
|
||||
*/
|
||||
@Deprecated
|
||||
public BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType) throws Exception {
|
||||
return add(data, defaultIndex, defaultType, null, null);
|
||||
}
|
||||
|
||||
public synchronized BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String defaultPipeline, @Nullable Object payload) throws Exception {
|
||||
/**
|
||||
* Adds the data from the bytes to be processed by the bulk processor
|
||||
*/
|
||||
public BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType,
|
||||
XContentType xContentType) throws Exception {
|
||||
return add(data, defaultIndex, defaultType, null, null, xContentType);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds the data from the bytes to be processed by the bulk processor
|
||||
* @deprecated use {@link #add(BytesReference, String, String, String, Object, XContentType)} instead to avoid content type
|
||||
* auto-detection
|
||||
*/
|
||||
@Deprecated
|
||||
public synchronized BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType,
|
||||
@Nullable String defaultPipeline, @Nullable Object payload) throws Exception {
|
||||
bulkRequest.add(data, defaultIndex, defaultType, null, null, null, defaultPipeline, payload, true);
|
||||
executeIfNeeded();
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds the data from the bytes to be processed by the bulk processor
|
||||
*/
|
||||
public synchronized BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType,
|
||||
@Nullable String defaultPipeline, @Nullable Object payload, XContentType xContentType) throws Exception {
|
||||
bulkRequest.add(data, defaultIndex, defaultType, null, null, null, defaultPipeline, payload, true, xContentType);
|
||||
executeIfNeeded();
|
||||
return this;
|
||||
}
|
||||
|
||||
private void executeIfNeeded() {
|
||||
ensureOpen();
|
||||
if (!isOverTheLimit()) {
|
||||
|
|
|
@ -438,23 +438,25 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques
|
|||
if ("index".equals(action)) {
|
||||
if (opType == null) {
|
||||
internalAdd(new IndexRequest(index, type, id).routing(routing).parent(parent).version(version).versionType(versionType)
|
||||
.setPipeline(pipeline).source(data.slice(from, nextMarker - from), xContentType), payload);
|
||||
.setPipeline(pipeline)
|
||||
.source(sliceTrimmingCarriageReturn(data, from, nextMarker,xContentType), xContentType), payload);
|
||||
} else {
|
||||
internalAdd(new IndexRequest(index, type, id).routing(routing).parent(parent).version(version).versionType(versionType)
|
||||
.create("create".equals(opType)).setPipeline(pipeline)
|
||||
.source(data.slice(from, nextMarker - from), xContentType), payload);
|
||||
.source(sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType), xContentType), payload);
|
||||
}
|
||||
} else if ("create".equals(action)) {
|
||||
internalAdd(new IndexRequest(index, type, id).routing(routing).parent(parent).version(version).versionType(versionType)
|
||||
.create(true).setPipeline(pipeline)
|
||||
.source(data.slice(from, nextMarker - from), xContentType), payload);
|
||||
.source(sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType), xContentType), payload);
|
||||
} else if ("update".equals(action)) {
|
||||
UpdateRequest updateRequest = new UpdateRequest(index, type, id).routing(routing).parent(parent).retryOnConflict(retryOnConflict)
|
||||
.version(version).versionType(versionType)
|
||||
.routing(routing)
|
||||
.parent(parent);
|
||||
// EMPTY is safe here because we never call namedObject
|
||||
try (XContentParser sliceParser = xContent.createParser(NamedXContentRegistry.EMPTY, data.slice(from, nextMarker - from))) {
|
||||
try (XContentParser sliceParser = xContent.createParser(NamedXContentRegistry.EMPTY,
|
||||
sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType))) {
|
||||
updateRequest.fromXContent(sliceParser);
|
||||
}
|
||||
if (fetchSourceContext != null) {
|
||||
|
@ -485,6 +487,20 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the sliced {@link BytesReference}. If the {@link XContentType} is JSON, the byte preceding the marker is checked to see
|
||||
* if it is a carriage return and if so, the BytesReference is sliced so that the carriage return is ignored
|
||||
*/
|
||||
private BytesReference sliceTrimmingCarriageReturn(BytesReference bytesReference, int from, int nextMarker, XContentType xContentType) {
|
||||
final int length;
|
||||
if (XContentType.JSON == xContentType && bytesReference.get(nextMarker - 1) == (byte) '\r') {
|
||||
length = nextMarker - from - 1;
|
||||
} else {
|
||||
length = nextMarker - from;
|
||||
}
|
||||
return bytesReference.slice(from, length);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the number of shard copies that must be active before proceeding with the write.
|
||||
* See {@link ReplicationRequest#waitForActiveShards(ActiveShardCount)} for details.
|
||||
|
|
|
@ -85,8 +85,14 @@ public class BulkShardRequest extends ReplicatedWriteRequest<BulkShardRequest> {
|
|||
@Override
|
||||
public String toString() {
|
||||
// This is included in error messages so we'll try to make it somewhat user friendly.
|
||||
StringBuilder b = new StringBuilder("BulkShardRequest to [");
|
||||
b.append(index).append("] containing [").append(items.length).append("] requests");
|
||||
StringBuilder b = new StringBuilder("BulkShardRequest [");
|
||||
b.append(shardId).append("] containing [");
|
||||
if (items.length > 1) {
|
||||
b.append(items.length).append("] requests");
|
||||
} else {
|
||||
b.append(items[0].request()).append("]");
|
||||
}
|
||||
|
||||
switch (getRefreshPolicy()) {
|
||||
case IMMEDIATE:
|
||||
b.append(" and a refresh");
|
||||
|
|
|
@ -82,7 +82,6 @@ import java.util.stream.Collectors;
|
|||
public class TransportBulkAction extends HandledTransportAction<BulkRequest, BulkResponse> {
|
||||
|
||||
private final AutoCreateIndex autoCreateIndex;
|
||||
private final boolean allowIdGeneration;
|
||||
private final ClusterService clusterService;
|
||||
private final IngestService ingestService;
|
||||
private final TransportShardBulkAction shardBulkAction;
|
||||
|
@ -115,7 +114,6 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
this.shardBulkAction = shardBulkAction;
|
||||
this.createIndexAction = createIndexAction;
|
||||
this.autoCreateIndex = autoCreateIndex;
|
||||
this.allowIdGeneration = this.settings.getAsBoolean("action.bulk.action.allow_id_generation", true);
|
||||
this.relativeTimeProvider = relativeTimeProvider;
|
||||
this.ingestForwarder = new IngestActionForwarder(transportService);
|
||||
clusterService.addStateApplier(this.ingestForwarder);
|
||||
|
@ -267,7 +265,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
mappingMd = indexMetaData.mappingOrDefault(indexRequest.type());
|
||||
}
|
||||
indexRequest.resolveRouting(metaData);
|
||||
indexRequest.process(mappingMd, allowIdGeneration, concreteIndex.getName());
|
||||
indexRequest.process(mappingMd, concreteIndex.getName());
|
||||
break;
|
||||
case UPDATE:
|
||||
TransportUpdateAction.resolveAndValidateRouting(metaData, concreteIndex.getName(), (UpdateRequest) docWriteRequest);
|
||||
|
|
|
@ -72,7 +72,6 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
public static final String ACTION_NAME = BulkAction.NAME + "[s]";
|
||||
|
||||
private final UpdateHelper updateHelper;
|
||||
private final boolean allowIdGeneration;
|
||||
private final MappingUpdatedAction mappingUpdatedAction;
|
||||
|
||||
@Inject
|
||||
|
@ -83,7 +82,6 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
super(settings, ACTION_NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters,
|
||||
indexNameExpressionResolver, BulkShardRequest::new, BulkShardRequest::new, ThreadPool.Names.BULK);
|
||||
this.updateHelper = updateHelper;
|
||||
this.allowIdGeneration = settings.getAsBoolean("action.allow_id_generation", true);
|
||||
this.mappingUpdatedAction = mappingUpdatedAction;
|
||||
}
|
||||
|
||||
|
@ -281,7 +279,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
case UPDATED:
|
||||
IndexRequest indexRequest = translate.action();
|
||||
MappingMetaData mappingMd = metaData.mappingOrDefault(indexRequest.type());
|
||||
indexRequest.process(mappingMd, allowIdGeneration, request.index());
|
||||
indexRequest.process(mappingMd, request.index());
|
||||
updateOperationResult = executeIndexRequestOnPrimary(indexRequest, primary, mappingUpdatedAction);
|
||||
if (updateOperationResult.hasFailure() == false) {
|
||||
// update the version on request so it will happen on the replicas
|
||||
|
@ -371,6 +369,9 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
BulkItemRequest item = request.items()[i];
|
||||
if (item.isIgnoreOnReplica() == false) {
|
||||
DocWriteRequest docWriteRequest = item.request();
|
||||
// ensure request version is updated for replica operation during request execution in the primary
|
||||
assert docWriteRequest.versionType() == docWriteRequest.versionType().versionTypeForReplicationAndRecovery()
|
||||
: "unexpected version in replica " + docWriteRequest.version();
|
||||
final Engine.Result operationResult;
|
||||
try {
|
||||
switch (docWriteRequest.opType()) {
|
||||
|
|
|
@ -255,7 +255,7 @@ public class ClientScrollableHitSource extends ScrollableHitSource {
|
|||
|
||||
private <T> T fieldValue(String fieldName) {
|
||||
SearchHitField field = delegate.field(fieldName);
|
||||
return field == null ? null : field.value();
|
||||
return field == null ? null : field.getValue();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,19 +20,14 @@
|
|||
package org.elasticsearch.action.delete;
|
||||
|
||||
import org.elasticsearch.action.DocWriteResponse;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.seqno.SequenceNumbersService;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
|
||||
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
|
||||
|
||||
/**
|
||||
* The response of the delete action.
|
||||
|
@ -45,7 +40,6 @@ public class DeleteResponse extends DocWriteResponse {
|
|||
private static final String FOUND = "found";
|
||||
|
||||
public DeleteResponse() {
|
||||
|
||||
}
|
||||
|
||||
public DeleteResponse(ShardId shardId, String type, String id, long seqNo, long version, boolean found) {
|
||||
|
@ -57,37 +51,6 @@ public class DeleteResponse extends DocWriteResponse {
|
|||
return result == Result.DELETED ? super.status() : RestStatus.NOT_FOUND;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field(FOUND, result == Result.DELETED);
|
||||
super.innerToXContent(builder, params);
|
||||
return builder;
|
||||
}
|
||||
|
||||
private static final ConstructingObjectParser<DeleteResponse, Void> PARSER;
|
||||
static {
|
||||
PARSER = new ConstructingObjectParser<>(DeleteResponse.class.getName(),
|
||||
args -> {
|
||||
// index uuid and shard id are unknown and can't be parsed back for now.
|
||||
ShardId shardId = new ShardId(new Index((String) args[0], IndexMetaData.INDEX_UUID_NA_VALUE), -1);
|
||||
String type = (String) args[1];
|
||||
String id = (String) args[2];
|
||||
long version = (long) args[3];
|
||||
ShardInfo shardInfo = (ShardInfo) args[5];
|
||||
long seqNo = (args[6] != null) ? (long) args[6] : SequenceNumbersService.UNASSIGNED_SEQ_NO;
|
||||
boolean found = (boolean) args[7];
|
||||
DeleteResponse deleteResponse = new DeleteResponse(shardId, type, id, seqNo, version, found);
|
||||
deleteResponse.setShardInfo(shardInfo);
|
||||
return deleteResponse;
|
||||
});
|
||||
DocWriteResponse.declareParserFields(PARSER);
|
||||
PARSER.declareBoolean(constructorArg(), new ParseField(FOUND));
|
||||
}
|
||||
|
||||
public static DeleteResponse fromXContent(XContentParser parser) {
|
||||
return PARSER.apply(parser, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder builder = new StringBuilder();
|
||||
|
@ -100,4 +63,56 @@ public class DeleteResponse extends DocWriteResponse {
|
|||
builder.append(",shards=").append(getShardInfo());
|
||||
return builder.append("]").toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field(FOUND, result == Result.DELETED);
|
||||
super.innerToXContent(builder, params);
|
||||
return builder;
|
||||
}
|
||||
|
||||
public static DeleteResponse fromXContent(XContentParser parser) throws IOException {
|
||||
ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation);
|
||||
|
||||
DeleteResponseBuilder context = new DeleteResponseBuilder();
|
||||
while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
|
||||
parseXContentFields(parser, context);
|
||||
}
|
||||
return context.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse the current token and update the parsing context appropriately.
|
||||
*/
|
||||
public static void parseXContentFields(XContentParser parser, DeleteResponseBuilder context) throws IOException {
|
||||
XContentParser.Token token = parser.currentToken();
|
||||
String currentFieldName = parser.currentName();
|
||||
|
||||
if (FOUND.equals(currentFieldName)) {
|
||||
if (token.isValue()) {
|
||||
context.setFound(parser.booleanValue());
|
||||
}
|
||||
} else {
|
||||
DocWriteResponse.parseInnerToXContent(parser, context);
|
||||
}
|
||||
}
|
||||
|
||||
public static class DeleteResponseBuilder extends DocWriteResponse.DocWriteResponseBuilder {
|
||||
|
||||
private boolean found = false;
|
||||
|
||||
public void setFound(boolean found) {
|
||||
this.found = found;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DeleteResponse build() {
|
||||
DeleteResponse deleteResponse = new DeleteResponse(shardId, type, id, seqNo, version, found);
|
||||
deleteResponse.setForcedRefresh(forcedRefresh);
|
||||
if (shardInfo != null) {
|
||||
deleteResponse.setShardInfo(shardInfo);
|
||||
}
|
||||
return deleteResponse;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -132,7 +132,7 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
}
|
||||
|
||||
/**
|
||||
* @return true if min/max informations are available for this field
|
||||
* @return true if min/max information is available for this field
|
||||
*/
|
||||
public boolean hasMinMax() {
|
||||
return hasMinMax;
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.elasticsearch.action.CompositeIndicesRequest;
|
|||
import org.elasticsearch.action.DocWriteRequest;
|
||||
import org.elasticsearch.action.RoutingMissingException;
|
||||
import org.elasticsearch.action.support.replication.ReplicatedWriteRequest;
|
||||
import org.elasticsearch.action.support.replication.ReplicationRequest;
|
||||
import org.elasticsearch.client.Requests;
|
||||
import org.elasticsearch.cluster.metadata.MappingMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
|
@ -36,6 +37,7 @@ import org.elasticsearch.common.bytes.BytesReference;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
|
@ -70,6 +72,13 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
|
|||
*/
|
||||
public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implements DocWriteRequest<IndexRequest>, CompositeIndicesRequest {
|
||||
|
||||
/**
|
||||
* Max length of the source document to include into toString()
|
||||
*
|
||||
* @see ReplicationRequest#createTask(long, java.lang.String, java.lang.String, org.elasticsearch.tasks.TaskId)
|
||||
*/
|
||||
static final int MAX_SOURCE_LENGTH_IN_TOSTRING = 2048;
|
||||
|
||||
private String type;
|
||||
private String id;
|
||||
@Nullable
|
||||
|
@ -517,7 +526,7 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
|||
}
|
||||
|
||||
|
||||
public void process(@Nullable MappingMetaData mappingMd, boolean allowIdGeneration, String concreteIndex) {
|
||||
public void process(@Nullable MappingMetaData mappingMd, String concreteIndex) {
|
||||
if (mappingMd != null) {
|
||||
// might as well check for routing here
|
||||
if (mappingMd.routing().required() && routing == null) {
|
||||
|
@ -533,9 +542,9 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
|||
}
|
||||
}
|
||||
|
||||
// generate id if not already provided and id generation is allowed
|
||||
if (allowIdGeneration && id == null) {
|
||||
assert autoGeneratedTimestamp == -1;
|
||||
// generate id if not already provided
|
||||
if (id == null) {
|
||||
assert autoGeneratedTimestamp == -1 : "timestamp has already been generated!";
|
||||
autoGeneratedTimestamp = Math.max(0, System.currentTimeMillis()); // extra paranoia
|
||||
id(UUIDs.base64UUID());
|
||||
}
|
||||
|
@ -564,7 +573,7 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
|||
pipeline = in.readOptionalString();
|
||||
isRetry = in.readBoolean();
|
||||
autoGeneratedTimestamp = in.readLong();
|
||||
if (in.getVersion().after(Version.V_5_3_0_UNRELEASED)) { // TODO update to onOrAfter after backporting
|
||||
if (in.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
|
||||
contentType = in.readOptionalWriteable(XContentType::readFrom);
|
||||
} else {
|
||||
contentType = XContentFactory.xContentType(source);
|
||||
|
@ -597,7 +606,7 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
|||
out.writeOptionalString(pipeline);
|
||||
out.writeBoolean(isRetry);
|
||||
out.writeLong(autoGeneratedTimestamp);
|
||||
if (out.getVersion().after(Version.V_5_3_0_UNRELEASED)) { // TODO update to onOrAfter after backporting
|
||||
if (out.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
|
||||
out.writeOptionalWriteable(contentType);
|
||||
}
|
||||
}
|
||||
|
@ -606,7 +615,12 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
|||
public String toString() {
|
||||
String sSource = "_na_";
|
||||
try {
|
||||
sSource = XContentHelper.convertToJson(source, false);
|
||||
if (source.length() > MAX_SOURCE_LENGTH_IN_TOSTRING) {
|
||||
sSource = "n/a, actual length: [" + new ByteSizeValue(source.length()).toString() + "], max length: " +
|
||||
new ByteSizeValue(MAX_SOURCE_LENGTH_IN_TOSTRING).toString();
|
||||
} else {
|
||||
sSource = XContentHelper.convertToJson(source, false);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
// ignore
|
||||
}
|
||||
|
|
|
@ -20,20 +20,15 @@
|
|||
package org.elasticsearch.action.index;
|
||||
|
||||
import org.elasticsearch.action.DocWriteResponse;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.seqno.SequenceNumbersService;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
|
||||
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
|
||||
|
||||
/**
|
||||
* A response of an index operation,
|
||||
|
@ -78,34 +73,48 @@ public class IndexResponse extends DocWriteResponse {
|
|||
return builder;
|
||||
}
|
||||
|
||||
/**
|
||||
* ConstructingObjectParser used to parse the {@link IndexResponse}. We use a ObjectParser here
|
||||
* because most fields are parsed by the parent abstract class {@link DocWriteResponse} and it's
|
||||
* not easy to parse part of the fields in the parent class and other fields in the children class
|
||||
* using the usual streamed parsing method.
|
||||
*/
|
||||
private static final ConstructingObjectParser<IndexResponse, Void> PARSER;
|
||||
static {
|
||||
PARSER = new ConstructingObjectParser<>(IndexResponse.class.getName(),
|
||||
args -> {
|
||||
// index uuid and shard id are unknown and can't be parsed back for now.
|
||||
ShardId shardId = new ShardId(new Index((String) args[0], IndexMetaData.INDEX_UUID_NA_VALUE), -1);
|
||||
String type = (String) args[1];
|
||||
String id = (String) args[2];
|
||||
long version = (long) args[3];
|
||||
ShardInfo shardInfo = (ShardInfo) args[5];
|
||||
long seqNo = (args[6] != null) ? (long) args[6] : SequenceNumbersService.UNASSIGNED_SEQ_NO;
|
||||
boolean created = (boolean) args[7];
|
||||
public static IndexResponse fromXContent(XContentParser parser) throws IOException {
|
||||
ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation);
|
||||
|
||||
IndexResponse indexResponse = new IndexResponse(shardId, type, id, seqNo, version, created);
|
||||
indexResponse.setShardInfo(shardInfo);
|
||||
return indexResponse;
|
||||
});
|
||||
DocWriteResponse.declareParserFields(PARSER);
|
||||
PARSER.declareBoolean(constructorArg(), new ParseField(CREATED));
|
||||
IndexResponseBuilder context = new IndexResponseBuilder();
|
||||
while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
|
||||
parseXContentFields(parser, context);
|
||||
}
|
||||
return context.build();
|
||||
}
|
||||
|
||||
public static IndexResponse fromXContent(XContentParser parser) throws IOException {
|
||||
return PARSER.apply(parser, null);
|
||||
/**
|
||||
* Parse the current token and update the parsing context appropriately.
|
||||
*/
|
||||
public static void parseXContentFields(XContentParser parser, IndexResponseBuilder context) throws IOException {
|
||||
XContentParser.Token token = parser.currentToken();
|
||||
String currentFieldName = parser.currentName();
|
||||
|
||||
if (CREATED.equals(currentFieldName)) {
|
||||
if (token.isValue()) {
|
||||
context.setCreated(parser.booleanValue());
|
||||
}
|
||||
} else {
|
||||
DocWriteResponse.parseInnerToXContent(parser, context);
|
||||
}
|
||||
}
|
||||
|
||||
public static class IndexResponseBuilder extends DocWriteResponse.DocWriteResponseBuilder {
|
||||
|
||||
private boolean created = false;
|
||||
|
||||
public void setCreated(boolean created) {
|
||||
this.created = created;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexResponse build() {
|
||||
IndexResponse indexResponse = new IndexResponse(shardId, type, id, seqNo, version, created);
|
||||
indexResponse.setForcedRefresh(forcedRefresh);
|
||||
if (shardInfo != null) {
|
||||
indexResponse.setShardInfo(shardInfo);
|
||||
}
|
||||
return indexResponse;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -80,7 +80,7 @@ public class PutPipelineRequest extends AcknowledgedRequest<PutPipelineRequest>
|
|||
super.readFrom(in);
|
||||
id = in.readString();
|
||||
source = in.readBytesReference();
|
||||
if (in.getVersion().after(Version.V_5_3_0_UNRELEASED)) { // TODO update to onOrAfter after backporting
|
||||
if (in.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
|
||||
xContentType = XContentType.readFrom(in);
|
||||
} else {
|
||||
xContentType = XContentFactory.xContentType(source);
|
||||
|
@ -92,7 +92,7 @@ public class PutPipelineRequest extends AcknowledgedRequest<PutPipelineRequest>
|
|||
super.writeTo(out);
|
||||
out.writeString(id);
|
||||
out.writeBytesReference(source);
|
||||
if (out.getVersion().after(Version.V_5_3_0_UNRELEASED)) { // TODO update to onOrAfter after backporting
|
||||
if (out.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
|
||||
xContentType.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -103,7 +103,7 @@ public class SimulatePipelineRequest extends ActionRequest {
|
|||
id = in.readOptionalString();
|
||||
verbose = in.readBoolean();
|
||||
source = in.readBytesReference();
|
||||
if (in.getVersion().after(Version.V_5_3_0_UNRELEASED)) { // TODO update to onOrAfter after backporting
|
||||
if (in.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
|
||||
xContentType = XContentType.readFrom(in);
|
||||
} else {
|
||||
xContentType = XContentFactory.xContentType(source);
|
||||
|
@ -116,7 +116,7 @@ public class SimulatePipelineRequest extends ActionRequest {
|
|||
out.writeOptionalString(id);
|
||||
out.writeBoolean(verbose);
|
||||
out.writeBytesReference(source);
|
||||
if (out.getVersion().after(Version.V_5_3_0_UNRELEASED)) { // TODO update to onOrAfter after backporting
|
||||
if (out.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
|
||||
xContentType.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,14 +23,17 @@ import org.elasticsearch.Build;
|
|||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class MainResponse extends ActionResponse implements ToXContent {
|
||||
public class MainResponse extends ActionResponse implements ToXContentObject {
|
||||
|
||||
private String nodeName;
|
||||
private Version version;
|
||||
|
@ -114,4 +117,24 @@ public class MainResponse extends ActionResponse implements ToXContent {
|
|||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
private static final ObjectParser<MainResponse, Void> PARSER = new ObjectParser<>(MainResponse.class.getName(), true,
|
||||
() -> new MainResponse());
|
||||
|
||||
static {
|
||||
PARSER.declareString((response, value) -> response.nodeName = value, new ParseField("name"));
|
||||
PARSER.declareString((response, value) -> response.clusterName = new ClusterName(value), new ParseField("cluster_name"));
|
||||
PARSER.declareString((response, value) -> response.clusterUuid = value, new ParseField("cluster_uuid"));
|
||||
PARSER.declareString((response, value) -> {}, new ParseField("tagline"));
|
||||
PARSER.declareObject((response, value) -> {
|
||||
response.build = new Build((String) value.get("build_hash"), (String) value.get("build_date"),
|
||||
(boolean) value.get("build_snapshot"));
|
||||
response.version = Version.fromString((String) value.get("number"));
|
||||
response.available = true;
|
||||
}, (parser, context) -> parser.map(), new ParseField("version"));
|
||||
}
|
||||
|
||||
public static MainResponse fromXContent(XContentParser parser) {
|
||||
return PARSER.apply(parser, null);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,77 +19,67 @@
|
|||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.util.SetOnce;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRunnable;
|
||||
import org.elasticsearch.action.NoShardAvailableActionException;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.TransportActions;
|
||||
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.CheckedRunnable;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.common.util.concurrent.CountDown;
|
||||
import org.elasticsearch.search.SearchPhaseResult;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.fetch.FetchSearchResult;
|
||||
import org.elasticsearch.search.fetch.FetchSearchResultProvider;
|
||||
import org.elasticsearch.search.fetch.ShardFetchSearchRequest;
|
||||
import org.elasticsearch.search.internal.AliasFilter;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.search.query.QuerySearchResult;
|
||||
import org.elasticsearch.search.query.QuerySearchResultProvider;
|
||||
import org.elasticsearch.transport.ConnectTransportException;
|
||||
import org.elasticsearch.transport.Transport;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.StringJoiner;
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.IntConsumer;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
|
||||
abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult> extends AbstractAsyncAction {
|
||||
abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> extends InitialSearchPhase<Result>
|
||||
implements SearchPhaseContext {
|
||||
private static final float DEFAULT_INDEX_BOOST = 1.0f;
|
||||
protected final Logger logger;
|
||||
protected final SearchTransportService searchTransportService;
|
||||
private final Logger logger;
|
||||
private final SearchTransportService searchTransportService;
|
||||
private final Executor executor;
|
||||
protected final ActionListener<SearchResponse> listener;
|
||||
private final GroupShardsIterator shardsIts;
|
||||
protected final SearchRequest request;
|
||||
/** Used by subclasses to resolve node ids to DiscoveryNodes. **/
|
||||
protected final Function<String, Transport.Connection> nodeIdToConnection;
|
||||
protected final SearchPhaseController searchPhaseController;
|
||||
protected final SearchTask task;
|
||||
private final int expectedSuccessfulOps;
|
||||
private final int expectedTotalOps;
|
||||
private final AtomicInteger successfulOps = new AtomicInteger();
|
||||
private final AtomicInteger totalOps = new AtomicInteger();
|
||||
private final AtomicArray<FirstResult> initialResults;
|
||||
private final ActionListener<SearchResponse> listener;
|
||||
private final SearchRequest request;
|
||||
/**
|
||||
* Used by subclasses to resolve node ids to DiscoveryNodes.
|
||||
**/
|
||||
private final Function<String, Transport.Connection> nodeIdToConnection;
|
||||
private final SearchTask task;
|
||||
private final AtomicArray<Result> results;
|
||||
private final long clusterStateVersion;
|
||||
private final Map<String, AliasFilter> aliasFilter;
|
||||
private final Map<String, Float> concreteIndexBoosts;
|
||||
private final long clusterStateVersion;
|
||||
private volatile AtomicArray<ShardSearchFailure> shardFailures;
|
||||
private final SetOnce<AtomicArray<ShardSearchFailure>> shardFailures = new SetOnce<>();
|
||||
private final Object shardFailuresMutex = new Object();
|
||||
private final AtomicInteger successfulOps = new AtomicInteger();
|
||||
private final long startTime;
|
||||
|
||||
protected AbstractSearchAsyncAction(Logger logger, SearchTransportService searchTransportService,
|
||||
|
||||
protected AbstractSearchAsyncAction(String name, Logger logger, SearchTransportService searchTransportService,
|
||||
Function<String, Transport.Connection> nodeIdToConnection,
|
||||
Map<String, AliasFilter> aliasFilter, Map<String, Float> concreteIndexBoosts,
|
||||
SearchPhaseController searchPhaseController, Executor executor, SearchRequest request,
|
||||
Executor executor, SearchRequest request,
|
||||
ActionListener<SearchResponse> listener, GroupShardsIterator shardsIts, long startTime,
|
||||
long clusterStateVersion, SearchTask task) {
|
||||
super(startTime);
|
||||
super(name, request, shardsIts, logger);
|
||||
this.startTime = startTime;
|
||||
this.logger = logger;
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
this.searchTransportService = searchTransportService;
|
||||
this.executor = executor;
|
||||
this.request = request;
|
||||
|
@ -97,175 +87,75 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
|
|||
this.listener = listener;
|
||||
this.nodeIdToConnection = nodeIdToConnection;
|
||||
this.clusterStateVersion = clusterStateVersion;
|
||||
this.shardsIts = shardsIts;
|
||||
expectedSuccessfulOps = shardsIts.size();
|
||||
// we need to add 1 for non active partition, since we count it in the total!
|
||||
expectedTotalOps = shardsIts.totalSizeWith1ForEmpty();
|
||||
initialResults = new AtomicArray<>(shardsIts.size());
|
||||
this.aliasFilter = aliasFilter;
|
||||
results = new AtomicArray<>(shardsIts.size());
|
||||
this.concreteIndexBoosts = concreteIndexBoosts;
|
||||
this.aliasFilter = aliasFilter;
|
||||
}
|
||||
|
||||
public void start() {
|
||||
if (expectedSuccessfulOps == 0) {
|
||||
/**
|
||||
* Builds how long it took to execute the search.
|
||||
*/
|
||||
private long buildTookInMillis() {
|
||||
// protect ourselves against time going backwards
|
||||
// negative values don't make sense and we want to be able to serialize that thing as a vLong
|
||||
return Math.max(1, System.currentTimeMillis() - startTime);
|
||||
}
|
||||
|
||||
/**
|
||||
* This is the main entry point for a search. This method starts the search execution of the initial phase.
|
||||
*/
|
||||
public final void start() {
|
||||
if (results.length() == 0) {
|
||||
//no search shards to search on, bail with empty response
|
||||
//(it happens with search across _all with no indices around and consistent with broadcast operations)
|
||||
listener.onResponse(new SearchResponse(InternalSearchResponse.empty(), null, 0, 0, buildTookInMillis(),
|
||||
ShardSearchFailure.EMPTY_ARRAY));
|
||||
return;
|
||||
}
|
||||
int shardIndex = -1;
|
||||
for (final ShardIterator shardIt : shardsIts) {
|
||||
shardIndex++;
|
||||
final ShardRouting shard = shardIt.nextOrNull();
|
||||
if (shard != null) {
|
||||
performInitialPhase(shardIndex, shardIt, shard);
|
||||
} else {
|
||||
// really, no shards active in this group
|
||||
onInitialPhaseResult(shardIndex, null, null, shardIt, new NoShardAvailableActionException(shardIt.shardId()));
|
||||
}
|
||||
}
|
||||
executePhase(this);
|
||||
}
|
||||
|
||||
void performInitialPhase(final int shardIndex, final ShardIterator shardIt, final ShardRouting shard) {
|
||||
if (shard == null) {
|
||||
// TODO upgrade this to an assert...
|
||||
// no more active shards... (we should not really get here, but just for safety)
|
||||
onInitialPhaseResult(shardIndex, null, null, shardIt, new NoShardAvailableActionException(shardIt.shardId()));
|
||||
@Override
|
||||
public final void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPhase) {
|
||||
/* This is the main search phase transition where we move to the next phase. At this point we check if there is
|
||||
* at least one successful operation left and if so we move to the next phase. If not we immediately fail the
|
||||
* search phase as "all shards failed"*/
|
||||
if (successfulOps.get() == 0) { // we have 0 successful results that means we shortcut stuff and return a failure
|
||||
if (logger.isDebugEnabled()) {
|
||||
final ShardOperationFailedException[] shardSearchFailures = ExceptionsHelper.groupBy(buildShardFailures());
|
||||
Throwable cause = ElasticsearchException.guessRootCauses(shardSearchFailures[0].getCause())[0];
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("All shards failed for phase: [{}]", getName()),
|
||||
cause);
|
||||
}
|
||||
onPhaseFailure(currentPhase, "all shards failed", null);
|
||||
} else {
|
||||
try {
|
||||
final Transport.Connection connection = nodeIdToConnection.apply(shard.currentNodeId());
|
||||
AliasFilter filter = this.aliasFilter.get(shard.index().getUUID());
|
||||
assert filter != null;
|
||||
|
||||
float indexBoost = concreteIndexBoosts.getOrDefault(shard.index().getUUID(), DEFAULT_INDEX_BOOST);
|
||||
ShardSearchTransportRequest transportRequest = new ShardSearchTransportRequest(request, shardIt.shardId(), shardsIts.size(),
|
||||
filter, indexBoost, startTime());
|
||||
sendExecuteFirstPhase(connection, transportRequest, new ActionListener<FirstResult>() {
|
||||
@Override
|
||||
public void onResponse(FirstResult result) {
|
||||
onInitialPhaseResult(shardIndex, shard.currentNodeId(), result, shardIt);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception t) {
|
||||
onInitialPhaseResult(shardIndex, shard, connection.getNode().getId(), shardIt, t);
|
||||
}
|
||||
});
|
||||
} catch (ConnectTransportException | IllegalArgumentException ex) {
|
||||
// we are getting the connection early here so we might run into nodes that are not connected. in that case we move on to
|
||||
// the next shard. previously when using discovery nodes here we had a special case for null when a node was not connected
|
||||
// at all which is not not needed anymore.
|
||||
onInitialPhaseResult(shardIndex, shard, shard.currentNodeId(), shardIt, ex);
|
||||
if (logger.isTraceEnabled()) {
|
||||
final String resultsFrom = results.asList().stream()
|
||||
.map(r -> r.value.shardTarget().toString()).collect(Collectors.joining(","));
|
||||
logger.trace("[{}] Moving to next phase: [{}], based on results from: {} (cluster state version: {})",
|
||||
currentPhase.getName(), nextPhase.getName(), resultsFrom, clusterStateVersion);
|
||||
}
|
||||
executePhase(nextPhase);
|
||||
}
|
||||
}
|
||||
|
||||
private void onInitialPhaseResult(int shardIndex, String nodeId, FirstResult result, ShardIterator shardIt) {
|
||||
result.shardTarget(new SearchShardTarget(nodeId, shardIt.shardId()));
|
||||
processFirstPhaseResult(shardIndex, result);
|
||||
// we need to increment successful ops first before we compare the exit condition otherwise if we
|
||||
// are fast we could concurrently update totalOps but then preempt one of the threads which can
|
||||
// cause the successor to read a wrong value from successfulOps if second phase is very fast ie. count etc.
|
||||
successfulOps.incrementAndGet();
|
||||
// increment all the "future" shards to update the total ops since we some may work and some may not...
|
||||
// and when that happens, we break on total ops, so we must maintain them
|
||||
final int xTotalOps = totalOps.addAndGet(shardIt.remaining() + 1);
|
||||
if (xTotalOps == expectedTotalOps) {
|
||||
executePhase(initialPhaseName(), innerGetNextPhase(), null);
|
||||
} else if (xTotalOps > expectedTotalOps) {
|
||||
raiseEarlyFailure(new IllegalStateException("unexpected higher total ops [" + xTotalOps + "] compared " +
|
||||
"to expected [" + expectedTotalOps + "]"));
|
||||
}
|
||||
}
|
||||
|
||||
protected void executePhase(String phaseName, CheckedRunnable<Exception> phase, Exception suppressedException) {
|
||||
private void executePhase(SearchPhase phase) {
|
||||
try {
|
||||
phase.run();
|
||||
} catch (Exception e) {
|
||||
if (suppressedException != null) {
|
||||
e.addSuppressed(suppressedException);
|
||||
}
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"Failed to execute [{}] while moving to second phase", request),
|
||||
"Failed to execute [{}] while moving to [{}] phase", request, phase.getName()),
|
||||
e);
|
||||
}
|
||||
raiseEarlyFailure(new ReduceSearchPhaseException(phaseName, "", e, buildShardFailures()));
|
||||
onPhaseFailure(phase, "", e);
|
||||
}
|
||||
}
|
||||
|
||||
private void onInitialPhaseResult(final int shardIndex, @Nullable ShardRouting shard, @Nullable String nodeId,
|
||||
final ShardIterator shardIt, Exception e) {
|
||||
// we always add the shard failure for a specific shard instance
|
||||
// we do make sure to clean it on a successful response from a shard
|
||||
SearchShardTarget shardTarget = new SearchShardTarget(nodeId, shardIt.shardId());
|
||||
addShardFailure(shardIndex, shardTarget, e);
|
||||
|
||||
if (totalOps.incrementAndGet() == expectedTotalOps) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
if (e != null && !TransportActions.isShardNotAvailableException(e)) {
|
||||
logger.debug(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"{}: Failed to execute [{}]",
|
||||
shard != null ? shard.shortSummary() :
|
||||
shardIt.shardId(),
|
||||
request),
|
||||
e);
|
||||
} else if (logger.isTraceEnabled()) {
|
||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("{}: Failed to execute [{}]", shard, request), e);
|
||||
}
|
||||
}
|
||||
final ShardSearchFailure[] shardSearchFailures = buildShardFailures();
|
||||
if (successfulOps.get() == 0) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("All shards failed for phase: [{}]", initialPhaseName()), e);
|
||||
}
|
||||
|
||||
// no successful ops, raise an exception
|
||||
raiseEarlyFailure(new SearchPhaseExecutionException(initialPhaseName(), "all shards failed", e, shardSearchFailures));
|
||||
} else {
|
||||
executePhase(initialPhaseName(), innerGetNextPhase(), e);
|
||||
}
|
||||
} else {
|
||||
final ShardRouting nextShard = shardIt.nextOrNull();
|
||||
final boolean lastShard = nextShard == null;
|
||||
// trace log this exception
|
||||
logger.trace(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"{}: Failed to execute [{}] lastShard [{}]",
|
||||
shard != null ? shard.shortSummary() : shardIt.shardId(),
|
||||
request,
|
||||
lastShard),
|
||||
e);
|
||||
if (!lastShard) {
|
||||
try {
|
||||
performInitialPhase(shardIndex, shardIt, nextShard);
|
||||
} catch (Exception inner) {
|
||||
inner.addSuppressed(e);
|
||||
onInitialPhaseResult(shardIndex, shard, shard.currentNodeId(), shardIt, inner);
|
||||
}
|
||||
} else {
|
||||
// no more shards active, add a failure
|
||||
if (logger.isDebugEnabled() && !logger.isTraceEnabled()) { // do not double log this exception
|
||||
if (e != null && !TransportActions.isShardNotAvailableException(e)) {
|
||||
logger.debug(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"{}: Failed to execute [{}] lastShard [{}]",
|
||||
shard != null ? shard.shortSummary() :
|
||||
shardIt.shardId(),
|
||||
request,
|
||||
lastShard),
|
||||
e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected final ShardSearchFailure[] buildShardFailures() {
|
||||
AtomicArray<ShardSearchFailure> shardFailures = this.shardFailures;
|
||||
private ShardSearchFailure[] buildShardFailures() {
|
||||
AtomicArray<ShardSearchFailure> shardFailures = this.shardFailures.get();
|
||||
if (shardFailures == null) {
|
||||
return ShardSearchFailure.EMPTY_ARRAY;
|
||||
}
|
||||
|
@ -277,17 +167,19 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
|
|||
return failures;
|
||||
}
|
||||
|
||||
protected final void addShardFailure(final int shardIndex, @Nullable SearchShardTarget shardTarget, Exception e) {
|
||||
public final void onShardFailure(final int shardIndex, @Nullable SearchShardTarget shardTarget, Exception e) {
|
||||
// we don't aggregate shard failures on non active shards (but do keep the header counts right)
|
||||
if (TransportActions.isShardNotAvailableException(e)) {
|
||||
return;
|
||||
}
|
||||
|
||||
AtomicArray<ShardSearchFailure> shardFailures = this.shardFailures.get();
|
||||
// lazily create shard failures, so we can early build the empty shard failure list in most cases (no failures)
|
||||
if (shardFailures == null) {
|
||||
if (shardFailures == null) { // this is double checked locking but it's fine since SetOnce uses a volatile read internally
|
||||
synchronized (shardFailuresMutex) {
|
||||
if (shardFailures == null) {
|
||||
shardFailures = new AtomicArray<>(shardsIts.size());
|
||||
shardFailures = this.shardFailures.get(); // read again otherwise somebody else has created it?
|
||||
if (shardFailures == null) { // still null so we are the first and create a new instance
|
||||
shardFailures = new AtomicArray<>(results.length());
|
||||
this.shardFailures.set(shardFailures);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -301,256 +193,123 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
|
|||
shardFailures.set(shardIndex, new ShardSearchFailure(e, shardTarget));
|
||||
}
|
||||
}
|
||||
|
||||
if (results.get(shardIndex) != null) {
|
||||
assert failure == null : "shard failed before but shouldn't: " + failure;
|
||||
successfulOps.decrementAndGet(); // if this shard was successful before (initial phase) we have to adjust the counter
|
||||
}
|
||||
}
|
||||
|
||||
private void raiseEarlyFailure(Exception e) {
|
||||
for (AtomicArray.Entry<FirstResult> entry : initialResults.asList()) {
|
||||
/**
|
||||
* This method should be called if a search phase failed to ensure all relevant search contexts and resources are released.
|
||||
* this method will also notify the listener and sends back a failure to the user.
|
||||
*
|
||||
* @param exception the exception explaining or causing the phase failure
|
||||
*/
|
||||
private void raisePhaseFailure(SearchPhaseExecutionException exception) {
|
||||
for (AtomicArray.Entry<Result> entry : results.asList()) {
|
||||
try {
|
||||
Transport.Connection connection = nodeIdToConnection.apply(entry.value.shardTarget().getNodeId());
|
||||
sendReleaseSearchContext(entry.value.id(), connection);
|
||||
} catch (Exception inner) {
|
||||
inner.addSuppressed(e);
|
||||
inner.addSuppressed(exception);
|
||||
logger.trace("failed to release context", inner);
|
||||
}
|
||||
}
|
||||
listener.onFailure(e);
|
||||
listener.onFailure(exception);
|
||||
}
|
||||
|
||||
protected void sendReleaseSearchContext(long contextId, Transport.Connection connection) {
|
||||
if (connection != null) {
|
||||
searchTransportService.sendFreeContext(connection, contextId, request);
|
||||
}
|
||||
}
|
||||
|
||||
protected ShardFetchSearchRequest createFetchRequest(QuerySearchResult queryResult, int index, IntArrayList entry,
|
||||
ScoreDoc[] lastEmittedDocPerShard) {
|
||||
final ScoreDoc lastEmittedDoc = (lastEmittedDocPerShard != null) ? lastEmittedDocPerShard[index] : null;
|
||||
return new ShardFetchSearchRequest(request, queryResult.id(), entry, lastEmittedDoc);
|
||||
}
|
||||
|
||||
protected abstract void sendExecuteFirstPhase(Transport.Connection connection, ShardSearchTransportRequest request,
|
||||
ActionListener<FirstResult> listener);
|
||||
|
||||
protected final void processFirstPhaseResult(int shardIndex, FirstResult result) {
|
||||
initialResults.set(shardIndex, result);
|
||||
|
||||
@Override
|
||||
public final void onShardSuccess(int shardIndex, Result result) {
|
||||
successfulOps.incrementAndGet();
|
||||
results.set(shardIndex, result);
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("got first-phase result from {}", result != null ? result.shardTarget() : null);
|
||||
}
|
||||
|
||||
// clean a previous error on this shard group (note, this code will be serialized on the same shardIndex value level
|
||||
// so its ok concurrency wise to miss potentially the shard failures being created because of another failure
|
||||
// in the #addShardFailure, because by definition, it will happen on *another* shardIndex
|
||||
AtomicArray<ShardSearchFailure> shardFailures = this.shardFailures;
|
||||
AtomicArray<ShardSearchFailure> shardFailures = this.shardFailures.get();
|
||||
if (shardFailures != null) {
|
||||
shardFailures.set(shardIndex, null);
|
||||
}
|
||||
}
|
||||
|
||||
final CheckedRunnable<Exception> innerGetNextPhase() {
|
||||
if (logger.isTraceEnabled()) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
boolean hadOne = false;
|
||||
for (int i = 0; i < initialResults.length(); i++) {
|
||||
FirstResult result = initialResults.get(i);
|
||||
if (result == null) {
|
||||
continue; // failure
|
||||
}
|
||||
if (hadOne) {
|
||||
sb.append(",");
|
||||
} else {
|
||||
hadOne = true;
|
||||
}
|
||||
sb.append(result.shardTarget());
|
||||
}
|
||||
|
||||
logger.trace("Moving to second phase, based on results from: {} (cluster state version: {})", sb, clusterStateVersion);
|
||||
}
|
||||
return getNextPhase(initialResults);
|
||||
@Override
|
||||
public final void onPhaseDone() {
|
||||
executeNextPhase(this, getNextPhase(results, this));
|
||||
}
|
||||
|
||||
protected abstract CheckedRunnable<Exception> getNextPhase(AtomicArray<FirstResult> initialResults);
|
||||
|
||||
protected abstract String initialPhaseName();
|
||||
|
||||
protected Executor getExecutor() {
|
||||
return executor;
|
||||
@Override
|
||||
public final int getNumShards() {
|
||||
return results.length();
|
||||
}
|
||||
|
||||
// this is a simple base class to simplify fan out to shards and collect
|
||||
final class CountedCollector<R extends SearchPhaseResult> {
|
||||
private final AtomicArray<R> resultArray;
|
||||
private final CountDown counter;
|
||||
private final IntConsumer onFinish;
|
||||
|
||||
CountedCollector(AtomicArray<R> resultArray, int expectedOps, IntConsumer onFinish) {
|
||||
this.resultArray = resultArray;
|
||||
this.counter = new CountDown(expectedOps);
|
||||
this.onFinish = onFinish;
|
||||
}
|
||||
|
||||
void countDown() {
|
||||
if (counter.countDown()) {
|
||||
onFinish.accept(successfulOps.get());
|
||||
}
|
||||
}
|
||||
|
||||
void onResult(int index, R result, SearchShardTarget target) {
|
||||
try {
|
||||
result.shardTarget(target);
|
||||
resultArray.set(index, result);
|
||||
} finally {
|
||||
countDown();
|
||||
}
|
||||
}
|
||||
|
||||
void onFailure(final int shardIndex, @Nullable SearchShardTarget shardTarget, Exception e) {
|
||||
try {
|
||||
addShardFailure(shardIndex, shardTarget, e);
|
||||
} finally {
|
||||
successfulOps.decrementAndGet();
|
||||
countDown();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public final Logger getLogger() {
|
||||
return logger;
|
||||
}
|
||||
|
||||
/*
|
||||
* At this point AbstractSearchAsyncAction is just a base-class for the first phase of a search where we have multiple replicas
|
||||
* for each shardID. If one of them is not available we move to the next one. Yet, once we passed that first stage we have to work with
|
||||
* the shards we succeeded on the initial phase.
|
||||
* Unfortunately, subsequent phases are not fully detached from the initial phase since they are all non-static inner classes.
|
||||
* In future changes this will be changed to detach the inner classes to test them in isolation and to simplify their creation.
|
||||
* The AbstractSearchAsyncAction should be final and it should just get a factory for the next phase instead of requiring subclasses
|
||||
* etc.
|
||||
*/
|
||||
final class FetchPhase implements CheckedRunnable<Exception> {
|
||||
private final AtomicArray<FetchSearchResult> fetchResults;
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
private final AtomicArray<QuerySearchResultProvider> queryResults;
|
||||
@Override
|
||||
public final SearchTask getTask() {
|
||||
return task;
|
||||
}
|
||||
|
||||
FetchPhase(AtomicArray<QuerySearchResultProvider> queryResults,
|
||||
SearchPhaseController searchPhaseController) {
|
||||
this.fetchResults = new AtomicArray<>(queryResults.length());
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
this.queryResults = queryResults;
|
||||
}
|
||||
@Override
|
||||
public final SearchRequest getRequest() {
|
||||
return request;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() throws Exception {
|
||||
final boolean isScrollRequest = request.scroll() != null;
|
||||
ScoreDoc[] sortedShardDocs = searchPhaseController.sortDocs(isScrollRequest, queryResults);
|
||||
final IntArrayList[] docIdsToLoad = searchPhaseController.fillDocIdsToLoad(queryResults.length(), sortedShardDocs);
|
||||
final IntConsumer finishPhase = successOpts
|
||||
-> sendResponseAsync("fetch", searchPhaseController, sortedShardDocs, queryResults, fetchResults);
|
||||
if (sortedShardDocs.length == 0) { // no docs to fetch -- sidestep everything and return
|
||||
queryResults.asList().stream()
|
||||
.map(e -> e.value.queryResult())
|
||||
.forEach(this::releaseIrrelevantSearchContext); // we have to release contexts here to free up resources
|
||||
finishPhase.accept(successfulOps.get());
|
||||
} else {
|
||||
final ScoreDoc[] lastEmittedDocPerShard = isScrollRequest ?
|
||||
searchPhaseController.getLastEmittedDocPerShard(queryResults.asList(), sortedShardDocs, queryResults.length())
|
||||
: null;
|
||||
final CountedCollector<FetchSearchResult> counter = new CountedCollector<>(fetchResults,
|
||||
docIdsToLoad.length, // we count down every shard in the result no matter if we got any results or not
|
||||
finishPhase);
|
||||
for (int i = 0; i < docIdsToLoad.length; i++) {
|
||||
IntArrayList entry = docIdsToLoad[i];
|
||||
QuerySearchResultProvider queryResult = queryResults.get(i);
|
||||
if (entry == null) { // no results for this shard ID
|
||||
if (queryResult != null) {
|
||||
// if we got some hits from this shard we have to release the context there
|
||||
// we do this as we go since it will free up resources and passing on the request on the
|
||||
// transport layer is cheap.
|
||||
releaseIrrelevantSearchContext(queryResult.queryResult());
|
||||
}
|
||||
// in any case we count down this result since we don't talk to this shard anymore
|
||||
counter.countDown();
|
||||
} else {
|
||||
Transport.Connection connection = nodeIdToConnection.apply(queryResult.shardTarget().getNodeId());
|
||||
ShardFetchSearchRequest fetchSearchRequest = createFetchRequest(queryResult.queryResult(), i, entry,
|
||||
lastEmittedDocPerShard);
|
||||
executeFetch(i, queryResult.shardTarget(), counter, fetchSearchRequest, queryResult.queryResult(),
|
||||
connection);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@Override
|
||||
public final SearchResponse buildSearchResponse(InternalSearchResponse internalSearchResponse, String scrollId) {
|
||||
return new SearchResponse(internalSearchResponse, scrollId, results.length(), successfulOps.get(),
|
||||
buildTookInMillis(), buildShardFailures());
|
||||
}
|
||||
|
||||
private void executeFetch(final int shardIndex, final SearchShardTarget shardTarget,
|
||||
final CountedCollector<FetchSearchResult> counter,
|
||||
final ShardFetchSearchRequest fetchSearchRequest, final QuerySearchResult querySearchResult,
|
||||
final Transport.Connection connection) {
|
||||
searchTransportService.sendExecuteFetch(connection, fetchSearchRequest, task, new ActionListener<FetchSearchResult>() {
|
||||
@Override
|
||||
public void onResponse(FetchSearchResult result) {
|
||||
counter.onResult(shardIndex, result, shardTarget);
|
||||
}
|
||||
@Override
|
||||
public final void onPhaseFailure(SearchPhase phase, String msg, Throwable cause) {
|
||||
raisePhaseFailure(new SearchPhaseExecutionException(phase.getName(), msg, cause, buildShardFailures()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
try {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute fetch phase",
|
||||
fetchSearchRequest.id()), e);
|
||||
}
|
||||
counter.onFailure(shardIndex, shardTarget, e);
|
||||
} finally {
|
||||
// the search context might not be cleared on the node where the fetch was executed for example
|
||||
// because the action was rejected by the thread pool. in this case we need to send a dedicated
|
||||
// request to clear the search context.
|
||||
releaseIrrelevantSearchContext(querySearchResult);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
@Override
|
||||
public final Transport.Connection getConnection(String nodeId) {
|
||||
return nodeIdToConnection.apply(nodeId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Releases shard targets that are not used in the docsIdsToLoad.
|
||||
*/
|
||||
private void releaseIrrelevantSearchContext(QuerySearchResult queryResult) {
|
||||
// we only release search context that we did not fetch from if we are not scrolling
|
||||
// and if it has at lease one hit that didn't make it to the global topDocs
|
||||
if (request.scroll() == null && queryResult.hasHits()) {
|
||||
try {
|
||||
Transport.Connection connection = nodeIdToConnection.apply(queryResult.shardTarget().getNodeId());
|
||||
sendReleaseSearchContext(queryResult.id(), connection);
|
||||
} catch (Exception e) {
|
||||
logger.trace("failed to release context", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@Override
|
||||
public final SearchTransportService getSearchTransport() {
|
||||
return searchTransportService;
|
||||
}
|
||||
|
||||
@Override
|
||||
public final void execute(Runnable command) {
|
||||
executor.execute(command);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final void onResponse(SearchResponse response) {
|
||||
listener.onResponse(response);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final void onFailure(Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
|
||||
public final ShardSearchTransportRequest buildShardSearchRequest(ShardIterator shardIt, ShardRouting shard) {
|
||||
AliasFilter filter = aliasFilter.get(shard.index().getUUID());
|
||||
assert filter != null;
|
||||
float indexBoost = concreteIndexBoosts.getOrDefault(shard.index().getUUID(), DEFAULT_INDEX_BOOST);
|
||||
return new ShardSearchTransportRequest(request, shardIt.shardId(), getNumShards(),
|
||||
filter, indexBoost, startTime);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends back a result to the user. This method will create the sorted docs if they are null and will build the scrollID for the
|
||||
* response. Note: This method will send the response in a different thread depending on the executor.
|
||||
* Returns the next phase based on the results of the initial search phase
|
||||
* @param results the results of the initial search phase. Each non null element in the result array represent a successfully
|
||||
* executed shard request
|
||||
* @param context the search context for the next phase
|
||||
*/
|
||||
final void sendResponseAsync(String phase, SearchPhaseController searchPhaseController, ScoreDoc[] sortedDocs,
|
||||
AtomicArray<? extends QuerySearchResultProvider> queryResultsArr,
|
||||
AtomicArray<? extends FetchSearchResultProvider> fetchResultsArr) {
|
||||
getExecutor().execute(new ActionRunnable<SearchResponse>(listener) {
|
||||
@Override
|
||||
public void doRun() throws IOException {
|
||||
final boolean isScrollRequest = request.scroll() != null;
|
||||
final ScoreDoc[] theScoreDocs = sortedDocs == null ? searchPhaseController.sortDocs(isScrollRequest, queryResultsArr)
|
||||
: sortedDocs;
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(isScrollRequest, theScoreDocs, queryResultsArr,
|
||||
fetchResultsArr);
|
||||
String scrollId = isScrollRequest ? TransportSearchHelper.buildScrollId(request.searchType(), queryResultsArr) : null;
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(),
|
||||
buildTookInMillis(), buildShardFailures()));
|
||||
}
|
||||
protected abstract SearchPhase getNextPhase(AtomicArray<Result> results, SearchPhaseContext context);
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
ReduceSearchPhaseException failure = new ReduceSearchPhaseException(phase, "", e, buildShardFailures());
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("failed to reduce search", failure);
|
||||
}
|
||||
super.onFailure(failure);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,83 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.common.util.concurrent.CountDown;
|
||||
import org.elasticsearch.search.SearchPhaseResult;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
|
||||
/**
|
||||
* This is a simple base class to simplify fan out to shards and collect their results. Each results passed to
|
||||
* {@link #onResult(int, SearchPhaseResult, SearchShardTarget)} will be set to the provided result array
|
||||
* where the given index is used to set the result on the array.
|
||||
*/
|
||||
final class CountedCollector<R extends SearchPhaseResult> {
|
||||
private final AtomicArray<R> resultArray;
|
||||
private final CountDown counter;
|
||||
private final Runnable onFinish;
|
||||
private final SearchPhaseContext context;
|
||||
|
||||
CountedCollector(AtomicArray<R> resultArray, int expectedOps, Runnable onFinish, SearchPhaseContext context) {
|
||||
if (expectedOps > resultArray.length()) {
|
||||
throw new IllegalStateException("unexpected number of operations. got: " + expectedOps + " but array size is: "
|
||||
+ resultArray.length());
|
||||
}
|
||||
this.resultArray = resultArray;
|
||||
this.counter = new CountDown(expectedOps);
|
||||
this.onFinish = onFinish;
|
||||
this.context = context;
|
||||
}
|
||||
|
||||
/**
|
||||
* Forcefully counts down an operation and executes the provided runnable
|
||||
* if all expected operations where executed
|
||||
*/
|
||||
void countDown() {
|
||||
assert counter.isCountedDown() == false : "more operations executed than specified";
|
||||
if (counter.countDown()) {
|
||||
onFinish.run();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the result to the given array index and then runs {@link #countDown()}
|
||||
*/
|
||||
void onResult(int index, R result, SearchShardTarget target) {
|
||||
try {
|
||||
result.shardTarget(target);
|
||||
resultArray.set(index, result);
|
||||
} finally {
|
||||
countDown();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Escalates the failure via {@link SearchPhaseContext#onShardFailure(int, SearchShardTarget, Exception)}
|
||||
* and then runs {@link #countDown()}
|
||||
*/
|
||||
void onFailure(final int shardIndex, @Nullable SearchShardTarget shardTarget, Exception e) {
|
||||
try {
|
||||
context.onShardFailure(shardIndex, shardTarget, e);
|
||||
} finally {
|
||||
countDown();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,96 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.dfs.AggregatedDfs;
|
||||
import org.elasticsearch.search.dfs.DfsSearchResult;
|
||||
import org.elasticsearch.search.query.QuerySearchRequest;
|
||||
import org.elasticsearch.search.query.QuerySearchResultProvider;
|
||||
import org.elasticsearch.transport.Transport;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.function.Function;
|
||||
|
||||
/**
|
||||
* This search phase fans out to every shards to execute a distributed search with a pre-collected distributed frequencies for all
|
||||
* search terms used in the actual search query. This phase is very similar to a the default query-then-fetch search phase but it doesn't
|
||||
* retry on another shard if any of the shards are failing. Failures are treated as shard failures and are counted as a non-successful
|
||||
* operation.
|
||||
* @see CountedCollector#onFailure(int, SearchShardTarget, Exception)
|
||||
*/
|
||||
final class DfsQueryPhase extends SearchPhase {
|
||||
private final AtomicArray<QuerySearchResultProvider> queryResult;
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
private final AtomicArray<DfsSearchResult> dfsSearchResults;
|
||||
private final Function<AtomicArray<QuerySearchResultProvider>, SearchPhase> nextPhaseFactory;
|
||||
private final SearchPhaseContext context;
|
||||
private final SearchTransportService searchTransportService;
|
||||
|
||||
DfsQueryPhase(AtomicArray<DfsSearchResult> dfsSearchResults,
|
||||
SearchPhaseController searchPhaseController,
|
||||
Function<AtomicArray<QuerySearchResultProvider>, SearchPhase> nextPhaseFactory, SearchPhaseContext context) {
|
||||
super("dfs_query");
|
||||
this.queryResult = new AtomicArray<>(dfsSearchResults.length());
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
this.dfsSearchResults = dfsSearchResults;
|
||||
this.nextPhaseFactory = nextPhaseFactory;
|
||||
this.context = context;
|
||||
this.searchTransportService = context.getSearchTransport();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() throws IOException {
|
||||
// TODO we can potentially also consume the actual per shard results from the initial phase here in the aggregateDfs
|
||||
// to free up memory early
|
||||
final AggregatedDfs dfs = searchPhaseController.aggregateDfs(dfsSearchResults);
|
||||
final CountedCollector<QuerySearchResultProvider> counter = new CountedCollector<>(queryResult, dfsSearchResults.asList().size(),
|
||||
() -> {
|
||||
context.executeNextPhase(this, nextPhaseFactory.apply(queryResult));
|
||||
}, context);
|
||||
for (final AtomicArray.Entry<DfsSearchResult> entry : dfsSearchResults.asList()) {
|
||||
DfsSearchResult dfsResult = entry.value;
|
||||
final int shardIndex = entry.index;
|
||||
final SearchShardTarget searchShardTarget = dfsResult.shardTarget();
|
||||
Transport.Connection connection = context.getConnection(searchShardTarget.getNodeId());
|
||||
QuerySearchRequest querySearchRequest = new QuerySearchRequest(context.getRequest(), dfsResult.id(), dfs);
|
||||
searchTransportService.sendExecuteQuery(connection, querySearchRequest, context.getTask(),
|
||||
ActionListener.wrap(
|
||||
result -> counter.onResult(shardIndex, result, searchShardTarget),
|
||||
exception -> {
|
||||
try {
|
||||
if (context.getLogger().isDebugEnabled()) {
|
||||
context.getLogger().debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute query phase",
|
||||
querySearchRequest.id()), exception);
|
||||
}
|
||||
counter.onFailure(shardIndex, searchShardTarget, exception);
|
||||
} finally {
|
||||
// the query might not have been executed at all (for example because thread pool rejected
|
||||
// execution) and the search context that was created in dfs phase might not be released.
|
||||
// release it again to be in the safe side
|
||||
context.sendReleaseSearchContext(querySearchRequest.id(), connection);
|
||||
}
|
||||
}));
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,149 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.index.query.BoolQueryBuilder;
|
||||
import org.elasticsearch.index.query.InnerHitBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.SearchHits;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.elasticsearch.search.collapse.CollapseBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.function.Function;
|
||||
|
||||
/**
|
||||
* This search phase is an optional phase that will be executed once all hits are fetched from the shards that executes
|
||||
* field-collapsing on the inner hits. This phase only executes if field collapsing is requested in the search request and otherwise
|
||||
* forwards to the next phase immediately.
|
||||
*/
|
||||
final class ExpandSearchPhase extends SearchPhase {
|
||||
private final SearchPhaseContext context;
|
||||
private final SearchResponse searchResponse;
|
||||
private final Function<SearchResponse, SearchPhase> nextPhaseFactory;
|
||||
|
||||
ExpandSearchPhase(SearchPhaseContext context, SearchResponse searchResponse,
|
||||
Function<SearchResponse, SearchPhase> nextPhaseFactory) {
|
||||
super("expand");
|
||||
this.context = context;
|
||||
this.searchResponse = searchResponse;
|
||||
this.nextPhaseFactory = nextPhaseFactory;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> iff the search request has inner hits and needs field collapsing
|
||||
*/
|
||||
private boolean isCollapseRequest() {
|
||||
final SearchRequest searchRequest = context.getRequest();
|
||||
return searchRequest.source() != null &&
|
||||
searchRequest.source().collapse() != null &&
|
||||
searchRequest.source().collapse().getInnerHit() != null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() throws IOException {
|
||||
if (isCollapseRequest()) {
|
||||
SearchRequest searchRequest = context.getRequest();
|
||||
CollapseBuilder collapseBuilder = searchRequest.source().collapse();
|
||||
MultiSearchRequest multiRequest = new MultiSearchRequest();
|
||||
if (collapseBuilder.getMaxConcurrentGroupRequests() > 0) {
|
||||
multiRequest.maxConcurrentSearchRequests(collapseBuilder.getMaxConcurrentGroupRequests());
|
||||
}
|
||||
for (SearchHit hit : searchResponse.getHits()) {
|
||||
BoolQueryBuilder groupQuery = new BoolQueryBuilder();
|
||||
Object collapseValue = hit.field(collapseBuilder.getField()).getValue();
|
||||
if (collapseValue != null) {
|
||||
groupQuery.filter(QueryBuilders.matchQuery(collapseBuilder.getField(), collapseValue));
|
||||
} else {
|
||||
groupQuery.mustNot(QueryBuilders.existsQuery(collapseBuilder.getField()));
|
||||
}
|
||||
QueryBuilder origQuery = searchRequest.source().query();
|
||||
if (origQuery != null) {
|
||||
groupQuery.must(origQuery);
|
||||
}
|
||||
SearchSourceBuilder sourceBuilder = buildExpandSearchSourceBuilder(collapseBuilder.getInnerHit())
|
||||
.query(groupQuery);
|
||||
SearchRequest groupRequest = new SearchRequest(searchRequest.indices())
|
||||
.types(searchRequest.types())
|
||||
.source(sourceBuilder);
|
||||
multiRequest.add(groupRequest);
|
||||
}
|
||||
context.getSearchTransport().sendExecuteMultiSearch(multiRequest, context.getTask(),
|
||||
ActionListener.wrap(response -> {
|
||||
Iterator<MultiSearchResponse.Item> it = response.iterator();
|
||||
for (SearchHit hit : searchResponse.getHits()) {
|
||||
MultiSearchResponse.Item item = it.next();
|
||||
if (item.isFailure()) {
|
||||
context.onPhaseFailure(this, "failed to expand hits", item.getFailure());
|
||||
return;
|
||||
}
|
||||
SearchHits innerHits = item.getResponse().getHits();
|
||||
if (hit.getInnerHits() == null) {
|
||||
hit.setInnerHits(new HashMap<>(1));
|
||||
}
|
||||
hit.getInnerHits().put(collapseBuilder.getInnerHit().getName(), innerHits);
|
||||
}
|
||||
context.executeNextPhase(this, nextPhaseFactory.apply(searchResponse));
|
||||
}, context::onFailure)
|
||||
);
|
||||
} else {
|
||||
context.executeNextPhase(this, nextPhaseFactory.apply(searchResponse));
|
||||
}
|
||||
}
|
||||
|
||||
private SearchSourceBuilder buildExpandSearchSourceBuilder(InnerHitBuilder options) {
|
||||
SearchSourceBuilder groupSource = new SearchSourceBuilder();
|
||||
groupSource.from(options.getFrom());
|
||||
groupSource.size(options.getSize());
|
||||
if (options.getSorts() != null) {
|
||||
options.getSorts().forEach(groupSource::sort);
|
||||
}
|
||||
if (options.getFetchSourceContext() != null) {
|
||||
if (options.getFetchSourceContext().includes() == null && options.getFetchSourceContext().excludes() == null) {
|
||||
groupSource.fetchSource(options.getFetchSourceContext().fetchSource());
|
||||
} else {
|
||||
groupSource.fetchSource(options.getFetchSourceContext().includes(),
|
||||
options.getFetchSourceContext().excludes());
|
||||
}
|
||||
}
|
||||
if (options.getDocValueFields() != null) {
|
||||
options.getDocValueFields().forEach(groupSource::docValueField);
|
||||
}
|
||||
if (options.getStoredFieldsContext() != null && options.getStoredFieldsContext().fieldNames() != null) {
|
||||
options.getStoredFieldsContext().fieldNames().forEach(groupSource::storedField);
|
||||
}
|
||||
if (options.getScriptFields() != null) {
|
||||
for (SearchSourceBuilder.ScriptField field : options.getScriptFields()) {
|
||||
groupSource.scriptField(field.fieldName(), field.script());
|
||||
}
|
||||
}
|
||||
if (options.getHighlightBuilder() != null) {
|
||||
groupSource.highlighter(options.getHighlightBuilder());
|
||||
}
|
||||
groupSource.explain(options.isExplain());
|
||||
groupSource.trackScores(options.isTrackScores());
|
||||
return groupSource;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,216 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRunnable;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.fetch.FetchSearchResult;
|
||||
import org.elasticsearch.search.fetch.ShardFetchSearchRequest;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.query.QuerySearchResult;
|
||||
import org.elasticsearch.search.query.QuerySearchResultProvider;
|
||||
import org.elasticsearch.transport.Transport;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.function.Function;
|
||||
|
||||
/**
|
||||
* This search phase merges the query results from the previous phase together and calculates the topN hits for this search.
|
||||
* Then it reaches out to all relevant shards to fetch the topN hits.
|
||||
*/
|
||||
final class FetchSearchPhase extends SearchPhase {
|
||||
private final AtomicArray<FetchSearchResult> fetchResults;
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
private final AtomicArray<QuerySearchResultProvider> queryResults;
|
||||
private final Function<SearchResponse, SearchPhase> nextPhaseFactory;
|
||||
private final SearchPhaseContext context;
|
||||
private final Logger logger;
|
||||
|
||||
FetchSearchPhase(AtomicArray<QuerySearchResultProvider> queryResults,
|
||||
SearchPhaseController searchPhaseController,
|
||||
SearchPhaseContext context) {
|
||||
this(queryResults, searchPhaseController, context,
|
||||
(response) -> new ExpandSearchPhase(context, response, // collapse only happens if the request has inner hits
|
||||
(finalResponse) -> sendResponsePhase(finalResponse, context)));
|
||||
}
|
||||
|
||||
FetchSearchPhase(AtomicArray<QuerySearchResultProvider> queryResults,
|
||||
SearchPhaseController searchPhaseController,
|
||||
SearchPhaseContext context, Function<SearchResponse, SearchPhase> nextPhaseFactory) {
|
||||
super("fetch");
|
||||
if (context.getNumShards() != queryResults.length()) {
|
||||
throw new IllegalStateException("number of shards must match the length of the query results but doesn't:"
|
||||
+ context.getNumShards() + "!=" + queryResults.length());
|
||||
}
|
||||
this.fetchResults = new AtomicArray<>(queryResults.length());
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
this.queryResults = queryResults;
|
||||
this.nextPhaseFactory = nextPhaseFactory;
|
||||
this.context = context;
|
||||
this.logger = context.getLogger();
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() throws IOException {
|
||||
context.execute(new ActionRunnable<SearchResponse>(context) {
|
||||
@Override
|
||||
public void doRun() throws IOException {
|
||||
// we do the heavy lifting in this inner run method where we reduce aggs etc. that's why we fork this phase
|
||||
// off immediately instead of forking when we send back the response to the user since there we only need
|
||||
// to merge together the fetched results which is a linear operation.
|
||||
innerRun();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
context.onPhaseFailure(FetchSearchPhase.this, "", e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private void innerRun() throws IOException {
|
||||
final int numShards = context.getNumShards();
|
||||
final boolean isScrollSearch = context.getRequest().scroll() != null;
|
||||
ScoreDoc[] sortedShardDocs = searchPhaseController.sortDocs(isScrollSearch, queryResults);
|
||||
String scrollId = isScrollSearch ? TransportSearchHelper.buildScrollId(queryResults) : null;
|
||||
List<AtomicArray.Entry<QuerySearchResultProvider>> queryResultsAsList = queryResults.asList();
|
||||
final SearchPhaseController.ReducedQueryPhase reducedQueryPhase = searchPhaseController.reducedQueryPhase(queryResultsAsList);
|
||||
final boolean queryAndFetchOptimization = queryResults.length() == 1;
|
||||
final Runnable finishPhase = ()
|
||||
-> moveToNextPhase(searchPhaseController, sortedShardDocs, scrollId, reducedQueryPhase, queryAndFetchOptimization ?
|
||||
queryResults : fetchResults);
|
||||
if (queryAndFetchOptimization) {
|
||||
assert queryResults.get(0) == null || queryResults.get(0).fetchResult() != null;
|
||||
// query AND fetch optimization
|
||||
finishPhase.run();
|
||||
} else {
|
||||
final IntArrayList[] docIdsToLoad = searchPhaseController.fillDocIdsToLoad(numShards, sortedShardDocs);
|
||||
if (sortedShardDocs.length == 0) { // no docs to fetch -- sidestep everything and return
|
||||
queryResultsAsList.stream()
|
||||
.map(e -> e.value.queryResult())
|
||||
.forEach(this::releaseIrrelevantSearchContext); // we have to release contexts here to free up resources
|
||||
finishPhase.run();
|
||||
} else {
|
||||
final ScoreDoc[] lastEmittedDocPerShard = isScrollSearch ?
|
||||
searchPhaseController.getLastEmittedDocPerShard(reducedQueryPhase, sortedShardDocs, numShards)
|
||||
: null;
|
||||
final CountedCollector<FetchSearchResult> counter = new CountedCollector<>(fetchResults,
|
||||
docIdsToLoad.length, // we count down every shard in the result no matter if we got any results or not
|
||||
finishPhase, context);
|
||||
for (int i = 0; i < docIdsToLoad.length; i++) {
|
||||
IntArrayList entry = docIdsToLoad[i];
|
||||
QuerySearchResultProvider queryResult = queryResults.get(i);
|
||||
if (entry == null) { // no results for this shard ID
|
||||
if (queryResult != null) {
|
||||
// if we got some hits from this shard we have to release the context there
|
||||
// we do this as we go since it will free up resources and passing on the request on the
|
||||
// transport layer is cheap.
|
||||
releaseIrrelevantSearchContext(queryResult.queryResult());
|
||||
}
|
||||
// in any case we count down this result since we don't talk to this shard anymore
|
||||
counter.countDown();
|
||||
} else {
|
||||
Transport.Connection connection = context.getConnection(queryResult.shardTarget().getNodeId());
|
||||
ShardFetchSearchRequest fetchSearchRequest = createFetchRequest(queryResult.queryResult().id(), i, entry,
|
||||
lastEmittedDocPerShard);
|
||||
executeFetch(i, queryResult.shardTarget(), counter, fetchSearchRequest, queryResult.queryResult(),
|
||||
connection);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected ShardFetchSearchRequest createFetchRequest(long queryId, int index, IntArrayList entry,
|
||||
ScoreDoc[] lastEmittedDocPerShard) {
|
||||
final ScoreDoc lastEmittedDoc = (lastEmittedDocPerShard != null) ? lastEmittedDocPerShard[index] : null;
|
||||
return new ShardFetchSearchRequest(context.getRequest(), queryId, entry, lastEmittedDoc);
|
||||
}
|
||||
|
||||
private void executeFetch(final int shardIndex, final SearchShardTarget shardTarget,
|
||||
final CountedCollector<FetchSearchResult> counter,
|
||||
final ShardFetchSearchRequest fetchSearchRequest, final QuerySearchResult querySearchResult,
|
||||
final Transport.Connection connection) {
|
||||
context.getSearchTransport().sendExecuteFetch(connection, fetchSearchRequest, context.getTask(),
|
||||
new ActionListener<FetchSearchResult>() {
|
||||
@Override
|
||||
public void onResponse(FetchSearchResult result) {
|
||||
counter.onResult(shardIndex, result, shardTarget);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
try {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute fetch phase",
|
||||
fetchSearchRequest.id()), e);
|
||||
}
|
||||
counter.onFailure(shardIndex, shardTarget, e);
|
||||
} finally {
|
||||
// the search context might not be cleared on the node where the fetch was executed for example
|
||||
// because the action was rejected by the thread pool. in this case we need to send a dedicated
|
||||
// request to clear the search context.
|
||||
releaseIrrelevantSearchContext(querySearchResult);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Releases shard targets that are not used in the docsIdsToLoad.
|
||||
*/
|
||||
private void releaseIrrelevantSearchContext(QuerySearchResult queryResult) {
|
||||
// we only release search context that we did not fetch from if we are not scrolling
|
||||
// and if it has at lease one hit that didn't make it to the global topDocs
|
||||
if (context.getRequest().scroll() == null && queryResult.hasHits()) {
|
||||
try {
|
||||
Transport.Connection connection = context.getConnection(queryResult.shardTarget().getNodeId());
|
||||
context.sendReleaseSearchContext(queryResult.id(), connection);
|
||||
} catch (Exception e) {
|
||||
context.getLogger().trace("failed to release context", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void moveToNextPhase(SearchPhaseController searchPhaseController, ScoreDoc[] sortedDocs,
|
||||
String scrollId, SearchPhaseController.ReducedQueryPhase reducedQueryPhase,
|
||||
AtomicArray<? extends QuerySearchResultProvider> fetchResultsArr) {
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(context.getRequest().scroll() != null,
|
||||
sortedDocs, reducedQueryPhase, fetchResultsArr);
|
||||
context.executeNextPhase(this, nextPhaseFactory.apply(context.buildSearchResponse(internalResponse, scrollId)));
|
||||
}
|
||||
|
||||
private static SearchPhase sendResponsePhase(SearchResponse response, SearchPhaseContext context) {
|
||||
return new SearchPhase("response") {
|
||||
@Override
|
||||
public void run() throws IOException {
|
||||
context.onResponse(response);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
|
@ -0,0 +1,216 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.NoShardAvailableActionException;
|
||||
import org.elasticsearch.action.support.TransportActions;
|
||||
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.search.SearchPhaseResult;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.transport.ConnectTransportException;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
/**
|
||||
* This is an abstract base class that encapsulates the logic to fan out to all shards in provided {@link GroupShardsIterator}
|
||||
* and collect the results. If a shard request returns a failure this class handles the advance to the next replica of the shard until
|
||||
* the shards replica iterator is exhausted. Each shard is referenced by position in the {@link GroupShardsIterator} which is later
|
||||
* referred to as the <tt>shardIndex</tt>.
|
||||
* The fan out and collect algorithm is traditionally used as the initial phase which can either be a query execution or collection
|
||||
* distributed frequencies
|
||||
*/
|
||||
abstract class InitialSearchPhase<FirstResult extends SearchPhaseResult> extends SearchPhase {
|
||||
private final SearchRequest request;
|
||||
private final GroupShardsIterator shardsIts;
|
||||
private final Logger logger;
|
||||
private final int expectedTotalOps;
|
||||
private final AtomicInteger totalOps = new AtomicInteger();
|
||||
|
||||
InitialSearchPhase(String name, SearchRequest request, GroupShardsIterator shardsIts, Logger logger) {
|
||||
super(name);
|
||||
this.request = request;
|
||||
this.shardsIts = shardsIts;
|
||||
this.logger = logger;
|
||||
// we need to add 1 for non active partition, since we count it in the total. This means for each shard in the iterator we sum up
|
||||
// it's number of active shards but use 1 as the default if no replica of a shard is active at this point.
|
||||
// on a per shards level we use shardIt.remaining() to increment the totalOps pointer but add 1 for the current shard result
|
||||
// we process hence we add one for the non active partition here.
|
||||
this.expectedTotalOps = shardsIts.totalSizeWith1ForEmpty();
|
||||
}
|
||||
|
||||
private void onShardFailure(final int shardIndex, @Nullable ShardRouting shard, @Nullable String nodeId,
|
||||
final ShardIterator shardIt, Exception e) {
|
||||
// we always add the shard failure for a specific shard instance
|
||||
// we do make sure to clean it on a successful response from a shard
|
||||
SearchShardTarget shardTarget = new SearchShardTarget(nodeId, shardIt.shardId());
|
||||
onShardFailure(shardIndex, shardTarget, e);
|
||||
|
||||
if (totalOps.incrementAndGet() == expectedTotalOps) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
if (e != null && !TransportActions.isShardNotAvailableException(e)) {
|
||||
logger.debug(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"{}: Failed to execute [{}]",
|
||||
shard != null ? shard.shortSummary() :
|
||||
shardIt.shardId(),
|
||||
request),
|
||||
e);
|
||||
} else if (logger.isTraceEnabled()) {
|
||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("{}: Failed to execute [{}]", shard, request), e);
|
||||
}
|
||||
}
|
||||
onPhaseDone();
|
||||
} else {
|
||||
final ShardRouting nextShard = shardIt.nextOrNull();
|
||||
final boolean lastShard = nextShard == null;
|
||||
// trace log this exception
|
||||
logger.trace(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"{}: Failed to execute [{}] lastShard [{}]",
|
||||
shard != null ? shard.shortSummary() : shardIt.shardId(),
|
||||
request,
|
||||
lastShard),
|
||||
e);
|
||||
if (!lastShard) {
|
||||
try {
|
||||
performPhaseOnShard(shardIndex, shardIt, nextShard);
|
||||
} catch (Exception inner) {
|
||||
inner.addSuppressed(e);
|
||||
onShardFailure(shardIndex, shard, shard.currentNodeId(), shardIt, inner);
|
||||
}
|
||||
} else {
|
||||
// no more shards active, add a failure
|
||||
if (logger.isDebugEnabled() && !logger.isTraceEnabled()) { // do not double log this exception
|
||||
if (e != null && !TransportActions.isShardNotAvailableException(e)) {
|
||||
logger.debug(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"{}: Failed to execute [{}] lastShard [{}]",
|
||||
shard != null ? shard.shortSummary() :
|
||||
shardIt.shardId(),
|
||||
request,
|
||||
lastShard),
|
||||
e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public final void run() throws IOException {
|
||||
int shardIndex = -1;
|
||||
for (final ShardIterator shardIt : shardsIts) {
|
||||
shardIndex++;
|
||||
final ShardRouting shard = shardIt.nextOrNull();
|
||||
if (shard != null) {
|
||||
performPhaseOnShard(shardIndex, shardIt, shard);
|
||||
} else {
|
||||
// really, no shards active in this group
|
||||
onShardFailure(shardIndex, null, null, shardIt, new NoShardAvailableActionException(shardIt.shardId()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void performPhaseOnShard(final int shardIndex, final ShardIterator shardIt, final ShardRouting shard) {
|
||||
if (shard == null) {
|
||||
// TODO upgrade this to an assert...
|
||||
// no more active shards... (we should not really get here, but just for safety)
|
||||
onShardFailure(shardIndex, null, null, shardIt, new NoShardAvailableActionException(shardIt.shardId()));
|
||||
} else {
|
||||
try {
|
||||
executePhaseOnShard(shardIt, shard, new ActionListener<FirstResult>() {
|
||||
@Override
|
||||
public void onResponse(FirstResult result) {
|
||||
onShardResult(shardIndex, shard.currentNodeId(), result, shardIt);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception t) {
|
||||
onShardFailure(shardIndex, shard, shard.currentNodeId(), shardIt, t);
|
||||
}
|
||||
});
|
||||
} catch (ConnectTransportException | IllegalArgumentException ex) {
|
||||
// we are getting the connection early here so we might run into nodes that are not connected. in that case we move on to
|
||||
// the next shard. previously when using discovery nodes here we had a special case for null when a node was not connected
|
||||
// at all which is not not needed anymore.
|
||||
onShardFailure(shardIndex, shard, shard.currentNodeId(), shardIt, ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void onShardResult(int shardIndex, String nodeId, FirstResult result, ShardIterator shardIt) {
|
||||
result.shardTarget(new SearchShardTarget(nodeId, shardIt.shardId()));
|
||||
onShardSuccess(shardIndex, result);
|
||||
// we need to increment successful ops first before we compare the exit condition otherwise if we
|
||||
// are fast we could concurrently update totalOps but then preempt one of the threads which can
|
||||
// cause the successor to read a wrong value from successfulOps if second phase is very fast ie. count etc.
|
||||
// increment all the "future" shards to update the total ops since we some may work and some may not...
|
||||
// and when that happens, we break on total ops, so we must maintain them
|
||||
final int xTotalOps = totalOps.addAndGet(shardIt.remaining() + 1);
|
||||
if (xTotalOps == expectedTotalOps) {
|
||||
onPhaseDone();
|
||||
} else if (xTotalOps > expectedTotalOps) {
|
||||
throw new AssertionError("unexpected higher total ops [" + xTotalOps + "] compared to expected ["
|
||||
+ expectedTotalOps + "]");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Executed once all shard results have been received and processed
|
||||
* @see #onShardFailure(int, SearchShardTarget, Exception)
|
||||
* @see #onShardSuccess(int, SearchPhaseResult)
|
||||
*/
|
||||
abstract void onPhaseDone(); // as a tribute to @kimchy aka. finishHim()
|
||||
|
||||
/**
|
||||
* Executed once for every failed shard level request. This method is invoked before the next replica is tried for the given
|
||||
* shard target.
|
||||
* @param shardIndex the internal index for this shard. Each shard has an index / ordinal assigned that is used to reference
|
||||
* it's results
|
||||
* @param shardTarget the shard target for this failure
|
||||
* @param ex the failure reason
|
||||
*/
|
||||
abstract void onShardFailure(int shardIndex, SearchShardTarget shardTarget, Exception ex);
|
||||
|
||||
/**
|
||||
* Executed once for every successful shard level request.
|
||||
* @param shardIndex the internal index for this shard. Each shard has an index / ordinal assigned that is used to reference
|
||||
* it's results
|
||||
* @param result the result returned form the shard
|
||||
*
|
||||
*/
|
||||
abstract void onShardSuccess(int shardIndex, FirstResult result);
|
||||
|
||||
/**
|
||||
* Sends the request to the actual shard.
|
||||
* @param shardIt the shards iterator
|
||||
* @param shard the shard routing to send the request for
|
||||
* @param listener the listener to notify on response
|
||||
*/
|
||||
protected abstract void executePhaseOnShard(ShardIterator shardIt, ShardRouting shard, ActionListener<FirstResult> listener);
|
||||
}
|
|
@ -97,6 +97,14 @@ public final class RemoteClusterService extends AbstractComponent implements Clo
|
|||
public static final Setting<String> REMOTE_NODE_ATTRIBUTE = Setting.simpleString("search.remote.node.attr",
|
||||
Setting.Property.NodeScope);
|
||||
|
||||
/**
|
||||
* If <code>true</code> connecting to remote clusters is supported on this node. If <code>false</code> this node will not establish
|
||||
* connections to any remote clusters configured. Search requests executed against this node (where this node is the coordinating node)
|
||||
* will fail if remote cluster syntax is used as an index pattern. The default is <code>true</code>
|
||||
*/
|
||||
public static final Setting<Boolean> ENABLE_REMOTE_CLUSTERS = Setting.boolSetting("search.remote.connect", true,
|
||||
Setting.Property.NodeScope);
|
||||
|
||||
private static final char REMOTE_CLUSTER_INDEX_SEPARATOR = ':';
|
||||
|
||||
private final TransportService transportService;
|
||||
|
|
|
@ -20,19 +20,13 @@
|
|||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
||||
import org.elasticsearch.common.CheckedRunnable;
|
||||
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.dfs.AggregatedDfs;
|
||||
import org.elasticsearch.search.dfs.DfsSearchResult;
|
||||
import org.elasticsearch.search.internal.AliasFilter;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.search.query.QuerySearchRequest;
|
||||
import org.elasticsearch.search.query.QuerySearchResult;
|
||||
import org.elasticsearch.search.query.QuerySearchResultProvider;
|
||||
import org.elasticsearch.transport.Transport;
|
||||
|
||||
import java.util.Map;
|
||||
|
@ -40,6 +34,7 @@ import java.util.concurrent.Executor;
|
|||
import java.util.function.Function;
|
||||
|
||||
final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<DfsSearchResult> {
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
|
||||
SearchDfsQueryThenFetchAsyncAction(Logger logger, SearchTransportService searchTransportService,
|
||||
Function<String, Transport.Connection> nodeIdToConnection,
|
||||
|
@ -47,81 +42,20 @@ final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction
|
|||
SearchPhaseController searchPhaseController, Executor executor, SearchRequest request,
|
||||
ActionListener<SearchResponse> listener, GroupShardsIterator shardsIts, long startTime,
|
||||
long clusterStateVersion, SearchTask task) {
|
||||
super(logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, searchPhaseController, executor,
|
||||
super("dfs", logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, executor,
|
||||
request, listener, shardsIts, startTime, clusterStateVersion, task);
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String initialPhaseName() {
|
||||
return "dfs";
|
||||
protected void executePhaseOnShard(ShardIterator shardIt, ShardRouting shard, ActionListener listener) {
|
||||
getSearchTransport().sendExecuteDfs(getConnection(shard.currentNodeId()),
|
||||
buildShardSearchRequest(shardIt, shard) , getTask(), listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void sendExecuteFirstPhase(Transport.Connection connection, ShardSearchTransportRequest request,
|
||||
ActionListener<DfsSearchResult> listener) {
|
||||
searchTransportService.sendExecuteDfs(connection, request, task, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected CheckedRunnable<Exception> getNextPhase(AtomicArray<DfsSearchResult> initialResults) {
|
||||
return new DfsQueryPhase(initialResults, searchPhaseController,
|
||||
(queryResults) -> new FetchPhase(queryResults, searchPhaseController));
|
||||
}
|
||||
|
||||
private final class DfsQueryPhase implements CheckedRunnable<Exception> {
|
||||
private final AtomicArray<QuerySearchResultProvider> queryResult;
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
private final AtomicArray<DfsSearchResult> firstResults;
|
||||
private final Function<AtomicArray<QuerySearchResultProvider>, CheckedRunnable<Exception>> nextPhaseFactory;
|
||||
|
||||
DfsQueryPhase(AtomicArray<DfsSearchResult> firstResults,
|
||||
SearchPhaseController searchPhaseController,
|
||||
Function<AtomicArray<QuerySearchResultProvider>, CheckedRunnable<Exception>> nextPhaseFactory) {
|
||||
this.queryResult = new AtomicArray<>(firstResults.length());
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
this.firstResults = firstResults;
|
||||
this.nextPhaseFactory = nextPhaseFactory;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() throws Exception {
|
||||
final AggregatedDfs dfs = searchPhaseController.aggregateDfs(firstResults);
|
||||
final CountedCollector<QuerySearchResultProvider> counter = new CountedCollector<>(queryResult, firstResults.asList().size(),
|
||||
(successfulOps) -> {
|
||||
if (successfulOps == 0) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "all shards failed", buildShardFailures()));
|
||||
} else {
|
||||
executePhase("fetch", this.nextPhaseFactory.apply(queryResult), null);
|
||||
}
|
||||
});
|
||||
for (final AtomicArray.Entry<DfsSearchResult> entry : firstResults.asList()) {
|
||||
DfsSearchResult dfsResult = entry.value;
|
||||
final int shardIndex = entry.index;
|
||||
Transport.Connection connection = nodeIdToConnection.apply(dfsResult.shardTarget().getNodeId());
|
||||
QuerySearchRequest querySearchRequest = new QuerySearchRequest(request, dfsResult.id(), dfs);
|
||||
searchTransportService.sendExecuteQuery(connection, querySearchRequest, task, new ActionListener<QuerySearchResult>() {
|
||||
@Override
|
||||
public void onResponse(QuerySearchResult result) {
|
||||
counter.onResult(shardIndex, result, dfsResult.shardTarget());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
try {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] Failed to execute query phase",
|
||||
querySearchRequest.id()), e);
|
||||
}
|
||||
counter.onFailure(shardIndex, dfsResult.shardTarget(), e);
|
||||
} finally {
|
||||
// the query might not have been executed at all (for example because thread pool rejected
|
||||
// execution) and the search context that was created in dfs phase might not be released.
|
||||
// release it again to be in the safe side
|
||||
sendReleaseSearchContext(querySearchRequest.id(), connection);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
protected SearchPhase getNextPhase(AtomicArray<DfsSearchResult> results, SearchPhaseContext context) {
|
||||
return new DfsQueryPhase(results, searchPhaseController,
|
||||
(queryResults) -> new FetchSearchPhase(queryResults, searchPhaseController, context), context);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.elasticsearch.common.CheckedRunnable;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Base class for all individual search phases like collecting distributed frequencies, fetching documents, querying shards.
|
||||
*/
|
||||
abstract class SearchPhase implements CheckedRunnable<IOException> {
|
||||
private final String name;
|
||||
|
||||
protected SearchPhase(String name) {
|
||||
this.name = Objects.requireNonNull(name, "name must not be null");
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the phases name.
|
||||
*/
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,117 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.transport.Transport;
|
||||
|
||||
import java.util.concurrent.Executor;
|
||||
|
||||
/**
|
||||
* This class provide contextual state and access to resources across multiple search phases.
|
||||
*/
|
||||
interface SearchPhaseContext extends ActionListener<SearchResponse>, Executor {
|
||||
// TODO maybe we can make this concrete later - for now we just implement this in the base class for all initial phases
|
||||
|
||||
/**
|
||||
* Returns the total number of shards to the current search across all indices
|
||||
*/
|
||||
int getNumShards();
|
||||
|
||||
/**
|
||||
* Returns a logger for this context to prevent each individual phase to create their own logger.
|
||||
*/
|
||||
Logger getLogger();
|
||||
|
||||
/**
|
||||
* Returns the currently executing search task
|
||||
*/
|
||||
SearchTask getTask();
|
||||
|
||||
/**
|
||||
* Returns the currently executing search request
|
||||
*/
|
||||
SearchRequest getRequest();
|
||||
|
||||
/**
|
||||
* Builds the final search response that should be send back to the user.
|
||||
* @param internalSearchResponse the internal search response
|
||||
* @param scrollId an optional scroll ID if this search is a scroll search
|
||||
*/
|
||||
SearchResponse buildSearchResponse(InternalSearchResponse internalSearchResponse, String scrollId);
|
||||
|
||||
/**
|
||||
* This method will communicate a fatal phase failure back to the user. In contrast to a shard failure
|
||||
* will this method immediately fail the search request and return the failure to the issuer of the request
|
||||
* @param phase the phase that failed
|
||||
* @param msg an optional message
|
||||
* @param cause the cause of the phase failure
|
||||
*/
|
||||
void onPhaseFailure(SearchPhase phase, String msg, Throwable cause);
|
||||
|
||||
/**
|
||||
* This method will record a shard failure for the given shard index. In contrast to a phase failure
|
||||
* ({@link #onPhaseFailure(SearchPhase, String, Throwable)}) this method will immediately return to the user but will record
|
||||
* a shard failure for the given shard index. This should be called if a shard failure happens after we successfully retrieved
|
||||
* a result from that shard in a previous phase.
|
||||
*/
|
||||
void onShardFailure(int shardIndex, @Nullable SearchShardTarget shardTarget, Exception e);
|
||||
|
||||
/**
|
||||
* Returns a connection to the node if connected otherwise and {@link org.elasticsearch.transport.ConnectTransportException} will be
|
||||
* thrown.
|
||||
*/
|
||||
Transport.Connection getConnection(String nodeId);
|
||||
|
||||
/**
|
||||
* Returns the {@link SearchTransportService} to send shard request to other nodes
|
||||
*/
|
||||
SearchTransportService getSearchTransport();
|
||||
|
||||
/**
|
||||
* Releases a search context with the given context ID on the node the given connection is connected to.
|
||||
* @see org.elasticsearch.search.query.QuerySearchResult#id()
|
||||
* @see org.elasticsearch.search.fetch.FetchSearchResult#id()
|
||||
*
|
||||
*/
|
||||
default void sendReleaseSearchContext(long contextId, Transport.Connection connection) {
|
||||
if (connection != null) {
|
||||
getSearchTransport().sendFreeContext(connection, contextId, getRequest());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds an request for the initial search phase.
|
||||
*/
|
||||
ShardSearchTransportRequest buildShardSearchRequest(ShardIterator shardIt, ShardRouting shard);
|
||||
|
||||
/**
|
||||
* Processes the phase transition from on phase to another. This method handles all errors that happen during the initial run execution
|
||||
* of the next phase. If there are no successful operations in the context when this method is executed the search is aborted and
|
||||
* a response is returned to the user indicating that all shards have failed.
|
||||
*/
|
||||
void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPhase);
|
||||
}
|
|
@ -31,17 +31,15 @@ import org.apache.lucene.search.TermStatistics;
|
|||
import org.apache.lucene.search.TopDocs;
|
||||
import org.apache.lucene.search.TopFieldDocs;
|
||||
import org.apache.lucene.search.grouping.CollapseTopFieldDocs;
|
||||
import org.apache.lucene.util.ArrayUtil;
|
||||
import org.apache.lucene.util.IntsRef;
|
||||
import org.elasticsearch.common.collect.HppcMaps;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.ArrayUtils;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.util.IntArray;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.SearchHits;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregation;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregations;
|
||||
|
@ -49,9 +47,6 @@ import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator;
|
|||
import org.elasticsearch.search.dfs.AggregatedDfs;
|
||||
import org.elasticsearch.search.dfs.DfsSearchResult;
|
||||
import org.elasticsearch.search.fetch.FetchSearchResult;
|
||||
import org.elasticsearch.search.fetch.FetchSearchResultProvider;
|
||||
import org.elasticsearch.search.internal.InternalSearchHit;
|
||||
import org.elasticsearch.search.internal.InternalSearchHits;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.profile.ProfileShardResult;
|
||||
import org.elasticsearch.search.profile.SearchProfileShardResults;
|
||||
|
@ -68,10 +63,8 @@ import java.util.Arrays;
|
|||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.StreamSupport;
|
||||
|
||||
|
@ -89,25 +82,11 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
|
||||
private final BigArrays bigArrays;
|
||||
private final ScriptService scriptService;
|
||||
private final List<BiConsumer<SearchRequest, SearchResponse> > searchResponseListener;
|
||||
|
||||
public SearchPhaseController(Settings settings, BigArrays bigArrays, ScriptService scriptService) {
|
||||
this(settings, bigArrays, scriptService, Collections.emptyList());
|
||||
}
|
||||
|
||||
public SearchPhaseController(Settings settings, BigArrays bigArrays, ScriptService scriptService,
|
||||
List<BiConsumer<SearchRequest, SearchResponse> > searchResponseListener) {
|
||||
super(settings);
|
||||
this.bigArrays = bigArrays;
|
||||
this.scriptService = scriptService;
|
||||
this.searchResponseListener = searchResponseListener;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the search response listeners registry
|
||||
*/
|
||||
public List<BiConsumer<SearchRequest, SearchResponse> > getSearchResponseListener() {
|
||||
return searchResponseListener;
|
||||
}
|
||||
|
||||
public AggregatedDfs aggregateDfs(AtomicArray<DfsSearchResult> results) {
|
||||
|
@ -250,7 +229,7 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
Arrays.sort(sortedResults, QUERY_RESULT_ORDERING);
|
||||
QuerySearchResultProvider firstResult = sortedResults[0].value;
|
||||
|
||||
int topN = topN(results);
|
||||
int topN = firstResult.queryResult().size();
|
||||
int from = firstResult.queryResult().from();
|
||||
if (ignoreFrom) {
|
||||
from = 0;
|
||||
|
@ -346,16 +325,12 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
return scoreDocs;
|
||||
}
|
||||
|
||||
public ScoreDoc[] getLastEmittedDocPerShard(List<? extends AtomicArray.Entry<? extends QuerySearchResultProvider>> queryResults,
|
||||
public ScoreDoc[] getLastEmittedDocPerShard(ReducedQueryPhase reducedQueryPhase,
|
||||
ScoreDoc[] sortedScoreDocs, int numShards) {
|
||||
ScoreDoc[] lastEmittedDocPerShard = new ScoreDoc[numShards];
|
||||
if (queryResults.isEmpty() == false) {
|
||||
long fetchHits = 0;
|
||||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> queryResult : queryResults) {
|
||||
fetchHits += queryResult.value.queryResult().topDocs().scoreDocs.length;
|
||||
}
|
||||
if (reducedQueryPhase.isEmpty() == false) {
|
||||
// from is always zero as when we use scroll, we ignore from
|
||||
long size = Math.min(fetchHits, topN(queryResults));
|
||||
long size = Math.min(reducedQueryPhase.fetchHits, reducedQueryPhase.oneResult.size());
|
||||
// with collapsing we can have more hits than sorted docs
|
||||
size = Math.min(sortedScoreDocs.length, size);
|
||||
for (int sortedDocsIndex = 0; sortedDocsIndex < size; sortedDocsIndex++) {
|
||||
|
@ -390,22 +365,50 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
* completion suggestion ordered by suggestion name
|
||||
*/
|
||||
public InternalSearchResponse merge(boolean ignoreFrom, ScoreDoc[] sortedDocs,
|
||||
AtomicArray<? extends QuerySearchResultProvider> queryResultsArr,
|
||||
AtomicArray<? extends FetchSearchResultProvider> fetchResultsArr) {
|
||||
|
||||
List<? extends AtomicArray.Entry<? extends QuerySearchResultProvider>> queryResults = queryResultsArr.asList();
|
||||
List<? extends AtomicArray.Entry<? extends FetchSearchResultProvider>> fetchResults = fetchResultsArr.asList();
|
||||
|
||||
if (queryResults.isEmpty()) {
|
||||
ReducedQueryPhase reducedQueryPhase,
|
||||
AtomicArray<? extends QuerySearchResultProvider> fetchResultsArr) {
|
||||
if (reducedQueryPhase.isEmpty()) {
|
||||
return InternalSearchResponse.empty();
|
||||
}
|
||||
List<? extends AtomicArray.Entry<? extends QuerySearchResultProvider>> fetchResults = fetchResultsArr.asList();
|
||||
SearchHits hits = getHits(reducedQueryPhase, ignoreFrom, sortedDocs, fetchResultsArr);
|
||||
if (reducedQueryPhase.suggest != null) {
|
||||
if (!fetchResults.isEmpty()) {
|
||||
int currentOffset = hits.getHits().length;
|
||||
for (CompletionSuggestion suggestion : reducedQueryPhase.suggest.filter(CompletionSuggestion.class)) {
|
||||
final List<CompletionSuggestion.Entry.Option> suggestionOptions = suggestion.getOptions();
|
||||
for (int scoreDocIndex = currentOffset; scoreDocIndex < currentOffset + suggestionOptions.size(); scoreDocIndex++) {
|
||||
ScoreDoc shardDoc = sortedDocs[scoreDocIndex];
|
||||
QuerySearchResultProvider searchResultProvider = fetchResultsArr.get(shardDoc.shardIndex);
|
||||
if (searchResultProvider == null) {
|
||||
continue;
|
||||
}
|
||||
FetchSearchResult fetchResult = searchResultProvider.fetchResult();
|
||||
int fetchResultIndex = fetchResult.counterGetAndIncrement();
|
||||
if (fetchResultIndex < fetchResult.hits().internalHits().length) {
|
||||
SearchHit hit = fetchResult.hits().internalHits()[fetchResultIndex];
|
||||
CompletionSuggestion.Entry.Option suggestOption =
|
||||
suggestionOptions.get(scoreDocIndex - currentOffset);
|
||||
hit.score(shardDoc.score);
|
||||
hit.shard(fetchResult.shardTarget());
|
||||
suggestOption.setHit(hit);
|
||||
}
|
||||
}
|
||||
currentOffset += suggestionOptions.size();
|
||||
}
|
||||
assert currentOffset == sortedDocs.length : "expected no more score doc slices";
|
||||
}
|
||||
}
|
||||
return reducedQueryPhase.buildResponse(hits);
|
||||
}
|
||||
|
||||
QuerySearchResult firstResult = queryResults.get(0).value.queryResult();
|
||||
|
||||
private SearchHits getHits(ReducedQueryPhase reducedQueryPhase, boolean ignoreFrom, ScoreDoc[] sortedDocs,
|
||||
AtomicArray<? extends QuerySearchResultProvider> fetchResultsArr) {
|
||||
List<? extends AtomicArray.Entry<? extends QuerySearchResultProvider>> fetchResults = fetchResultsArr.asList();
|
||||
boolean sorted = false;
|
||||
int sortScoreIndex = -1;
|
||||
if (firstResult.topDocs() instanceof TopFieldDocs) {
|
||||
TopFieldDocs fieldDocs = (TopFieldDocs) firstResult.queryResult().topDocs();
|
||||
if (reducedQueryPhase.oneResult.topDocs() instanceof TopFieldDocs) {
|
||||
TopFieldDocs fieldDocs = (TopFieldDocs) reducedQueryPhase.oneResult.queryResult().topDocs();
|
||||
if (fieldDocs instanceof CollapseTopFieldDocs &&
|
||||
fieldDocs.fields.length == 1 && fieldDocs.fields[0].getType() == SortField.Type.SCORE) {
|
||||
sorted = false;
|
||||
|
@ -418,13 +421,67 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
}
|
||||
// clean the fetch counter
|
||||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : fetchResults) {
|
||||
entry.value.fetchResult().initCounter();
|
||||
}
|
||||
int from = ignoreFrom ? 0 : reducedQueryPhase.oneResult.queryResult().from();
|
||||
int numSearchHits = (int) Math.min(reducedQueryPhase.fetchHits - from, reducedQueryPhase.oneResult.size());
|
||||
// with collapsing we can have more fetch hits than sorted docs
|
||||
numSearchHits = Math.min(sortedDocs.length, numSearchHits);
|
||||
// merge hits
|
||||
List<SearchHit> hits = new ArrayList<>();
|
||||
if (!fetchResults.isEmpty()) {
|
||||
for (int i = 0; i < numSearchHits; i++) {
|
||||
ScoreDoc shardDoc = sortedDocs[i];
|
||||
QuerySearchResultProvider fetchResultProvider = fetchResultsArr.get(shardDoc.shardIndex);
|
||||
if (fetchResultProvider == null) {
|
||||
continue;
|
||||
}
|
||||
FetchSearchResult fetchResult = fetchResultProvider.fetchResult();
|
||||
int index = fetchResult.counterGetAndIncrement();
|
||||
if (index < fetchResult.hits().internalHits().length) {
|
||||
SearchHit searchHit = fetchResult.hits().internalHits()[index];
|
||||
searchHit.score(shardDoc.score);
|
||||
searchHit.shard(fetchResult.shardTarget());
|
||||
if (sorted) {
|
||||
FieldDoc fieldDoc = (FieldDoc) shardDoc;
|
||||
searchHit.sortValues(fieldDoc.fields, reducedQueryPhase.oneResult.sortValueFormats());
|
||||
if (sortScoreIndex != -1) {
|
||||
searchHit.score(((Number) fieldDoc.fields[sortScoreIndex]).floatValue());
|
||||
}
|
||||
}
|
||||
hits.add(searchHit);
|
||||
}
|
||||
}
|
||||
}
|
||||
return new SearchHits(hits.toArray(new SearchHit[hits.size()]), reducedQueryPhase.totalHits,
|
||||
reducedQueryPhase.maxScore);
|
||||
}
|
||||
|
||||
// count the total (we use the query result provider here, since we might not get any hits (we scrolled past them))
|
||||
/**
|
||||
* Reduces the given query results and consumes all aggregations and profile results.
|
||||
* @see QuerySearchResult#consumeAggs()
|
||||
* @see QuerySearchResult#consumeProfileResult()
|
||||
*/
|
||||
public final ReducedQueryPhase reducedQueryPhase(List<? extends AtomicArray.Entry<? extends QuerySearchResultProvider>> queryResults) {
|
||||
long totalHits = 0;
|
||||
long fetchHits = 0;
|
||||
float maxScore = Float.NEGATIVE_INFINITY;
|
||||
boolean timedOut = false;
|
||||
Boolean terminatedEarly = null;
|
||||
if (queryResults.isEmpty()) {
|
||||
return new ReducedQueryPhase(totalHits, fetchHits, maxScore, timedOut, terminatedEarly, null, null, null, null);
|
||||
}
|
||||
QuerySearchResult firstResult = queryResults.get(0).value.queryResult();
|
||||
final boolean hasSuggest = firstResult.suggest() != null;
|
||||
final boolean hasAggs = firstResult.hasAggs();
|
||||
final boolean hasProfileResults = firstResult.hasProfileResults();
|
||||
final List<InternalAggregations> aggregationsList = hasAggs ? new ArrayList<>(queryResults.size()) : Collections.emptyList();
|
||||
// count the total (we use the query result provider here, since we might not get any hits (we scrolled past them))
|
||||
final Map<String, List<Suggestion>> groupedSuggestions = hasSuggest ? new HashMap<>() : Collections.emptyMap();
|
||||
final Map<String, ProfileShardResult> profileResults = hasProfileResults ? new HashMap<>(queryResults.size())
|
||||
: Collections.emptyMap();
|
||||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : queryResults) {
|
||||
QuerySearchResult result = entry.value.queryResult();
|
||||
if (result.searchTimedOut()) {
|
||||
|
@ -442,138 +499,98 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
if (!Float.isNaN(result.topDocs().getMaxScore())) {
|
||||
maxScore = Math.max(maxScore, result.topDocs().getMaxScore());
|
||||
}
|
||||
}
|
||||
if (Float.isInfinite(maxScore)) {
|
||||
maxScore = Float.NaN;
|
||||
}
|
||||
|
||||
// clean the fetch counter
|
||||
for (AtomicArray.Entry<? extends FetchSearchResultProvider> entry : fetchResults) {
|
||||
entry.value.fetchResult().initCounter();
|
||||
}
|
||||
int from = ignoreFrom ? 0 : firstResult.queryResult().from();
|
||||
int numSearchHits = (int) Math.min(fetchHits - from, topN(queryResults));
|
||||
// with collapsing we can have more fetch hits than sorted docs
|
||||
numSearchHits = Math.min(sortedDocs.length, numSearchHits);
|
||||
// merge hits
|
||||
List<InternalSearchHit> hits = new ArrayList<>();
|
||||
if (!fetchResults.isEmpty()) {
|
||||
for (int i = 0; i < numSearchHits; i++) {
|
||||
ScoreDoc shardDoc = sortedDocs[i];
|
||||
FetchSearchResultProvider fetchResultProvider = fetchResultsArr.get(shardDoc.shardIndex);
|
||||
if (fetchResultProvider == null) {
|
||||
continue;
|
||||
}
|
||||
FetchSearchResult fetchResult = fetchResultProvider.fetchResult();
|
||||
int index = fetchResult.counterGetAndIncrement();
|
||||
if (index < fetchResult.hits().internalHits().length) {
|
||||
InternalSearchHit searchHit = fetchResult.hits().internalHits()[index];
|
||||
searchHit.score(shardDoc.score);
|
||||
searchHit.shard(fetchResult.shardTarget());
|
||||
if (sorted) {
|
||||
FieldDoc fieldDoc = (FieldDoc) shardDoc;
|
||||
searchHit.sortValues(fieldDoc.fields, firstResult.sortValueFormats());
|
||||
if (sortScoreIndex != -1) {
|
||||
searchHit.score(((Number) fieldDoc.fields[sortScoreIndex]).floatValue());
|
||||
}
|
||||
}
|
||||
hits.add(searchHit);
|
||||
if (hasSuggest) {
|
||||
assert result.suggest() != null;
|
||||
for (Suggestion<? extends Suggestion.Entry<? extends Suggestion.Entry.Option>> suggestion : result.suggest()) {
|
||||
List<Suggestion> suggestionList = groupedSuggestions.computeIfAbsent(suggestion.getName(), s -> new ArrayList<>());
|
||||
suggestionList.add(suggestion);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// merge suggest results
|
||||
Suggest suggest = null;
|
||||
if (firstResult.suggest() != null) {
|
||||
final Map<String, List<Suggestion>> groupedSuggestions = new HashMap<>();
|
||||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> queryResult : queryResults) {
|
||||
Suggest shardSuggest = queryResult.value.queryResult().suggest();
|
||||
if (shardSuggest != null) {
|
||||
for (Suggestion<? extends Suggestion.Entry<? extends Suggestion.Entry.Option>> suggestion : shardSuggest) {
|
||||
List<Suggestion> suggestionList = groupedSuggestions.computeIfAbsent(suggestion.getName(), s -> new ArrayList<>());
|
||||
suggestionList.add(suggestion);
|
||||
}
|
||||
}
|
||||
if (hasAggs) {
|
||||
aggregationsList.add((InternalAggregations) result.consumeAggs());
|
||||
}
|
||||
if (groupedSuggestions.isEmpty() == false) {
|
||||
suggest = new Suggest(Suggest.reduce(groupedSuggestions));
|
||||
if (!fetchResults.isEmpty()) {
|
||||
int currentOffset = numSearchHits;
|
||||
for (CompletionSuggestion suggestion : suggest.filter(CompletionSuggestion.class)) {
|
||||
final List<CompletionSuggestion.Entry.Option> suggestionOptions = suggestion.getOptions();
|
||||
for (int scoreDocIndex = currentOffset; scoreDocIndex < currentOffset + suggestionOptions.size(); scoreDocIndex++) {
|
||||
ScoreDoc shardDoc = sortedDocs[scoreDocIndex];
|
||||
FetchSearchResultProvider fetchSearchResultProvider = fetchResultsArr.get(shardDoc.shardIndex);
|
||||
if (fetchSearchResultProvider == null) {
|
||||
continue;
|
||||
}
|
||||
FetchSearchResult fetchResult = fetchSearchResultProvider.fetchResult();
|
||||
int fetchResultIndex = fetchResult.counterGetAndIncrement();
|
||||
if (fetchResultIndex < fetchResult.hits().internalHits().length) {
|
||||
InternalSearchHit hit = fetchResult.hits().internalHits()[fetchResultIndex];
|
||||
CompletionSuggestion.Entry.Option suggestOption =
|
||||
suggestionOptions.get(scoreDocIndex - currentOffset);
|
||||
hit.score(shardDoc.score);
|
||||
hit.shard(fetchResult.shardTarget());
|
||||
suggestOption.setHit(hit);
|
||||
}
|
||||
}
|
||||
currentOffset += suggestionOptions.size();
|
||||
}
|
||||
assert currentOffset == sortedDocs.length : "expected no more score doc slices";
|
||||
}
|
||||
if (hasProfileResults) {
|
||||
String key = result.shardTarget().toString();
|
||||
profileResults.put(key, result.consumeProfileResult());
|
||||
}
|
||||
}
|
||||
|
||||
// merge Aggregation
|
||||
InternalAggregations aggregations = null;
|
||||
if (firstResult.aggregations() != null && firstResult.aggregations().asList() != null) {
|
||||
List<InternalAggregations> aggregationsList = new ArrayList<>(queryResults.size());
|
||||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : queryResults) {
|
||||
aggregationsList.add((InternalAggregations) entry.value.queryResult().aggregations());
|
||||
}
|
||||
ReduceContext reduceContext = new ReduceContext(bigArrays, scriptService);
|
||||
aggregations = InternalAggregations.reduce(aggregationsList, reduceContext);
|
||||
List<SiblingPipelineAggregator> pipelineAggregators = firstResult.pipelineAggregators();
|
||||
if (pipelineAggregators != null) {
|
||||
List<InternalAggregation> newAggs = StreamSupport.stream(aggregations.spliterator(), false)
|
||||
.map((p) -> (InternalAggregation) p)
|
||||
.collect(Collectors.toList());
|
||||
for (SiblingPipelineAggregator pipelineAggregator : pipelineAggregators) {
|
||||
InternalAggregation newAgg = pipelineAggregator.doReduce(new InternalAggregations(newAggs), reduceContext);
|
||||
newAggs.add(newAgg);
|
||||
}
|
||||
aggregations = new InternalAggregations(newAggs);
|
||||
}
|
||||
}
|
||||
|
||||
//Collect profile results
|
||||
SearchProfileShardResults shardResults = null;
|
||||
if (firstResult.profileResults() != null) {
|
||||
Map<String, ProfileShardResult> profileResults = new HashMap<>(queryResults.size());
|
||||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : queryResults) {
|
||||
String key = entry.value.queryResult().shardTarget().toString();
|
||||
profileResults.put(key, entry.value.queryResult().profileResults());
|
||||
}
|
||||
shardResults = new SearchProfileShardResults(profileResults);
|
||||
}
|
||||
|
||||
InternalSearchHits searchHits = new InternalSearchHits(hits.toArray(new InternalSearchHit[hits.size()]), totalHits, maxScore);
|
||||
|
||||
return new InternalSearchResponse(searchHits, aggregations, suggest, shardResults, timedOut, terminatedEarly);
|
||||
final Suggest suggest = groupedSuggestions.isEmpty() ? null : new Suggest(Suggest.reduce(groupedSuggestions));
|
||||
final InternalAggregations aggregations = aggregationsList.isEmpty() ? null : reduceAggs(aggregationsList,
|
||||
firstResult.pipelineAggregators());
|
||||
final SearchProfileShardResults shardResults = profileResults.isEmpty() ? null : new SearchProfileShardResults(profileResults);
|
||||
return new ReducedQueryPhase(totalHits, fetchHits, maxScore, timedOut, terminatedEarly, firstResult, suggest, aggregations,
|
||||
shardResults);
|
||||
}
|
||||
|
||||
/**
|
||||
* returns the number of top results to be considered across all shards
|
||||
*/
|
||||
private static int topN(List<? extends AtomicArray.Entry<? extends QuerySearchResultProvider>> queryResults) {
|
||||
QuerySearchResultProvider firstResult = queryResults.get(0).value;
|
||||
int topN = firstResult.queryResult().size();
|
||||
if (firstResult.includeFetch()) {
|
||||
// if we did both query and fetch on the same go, we have fetched all the docs from each shards already, use them...
|
||||
// this is also important since we shortcut and fetch only docs from "from" and up to "size"
|
||||
topN *= queryResults.size();
|
||||
private InternalAggregations reduceAggs(List<InternalAggregations> aggregationsList,
|
||||
List<SiblingPipelineAggregator> pipelineAggregators) {
|
||||
ReduceContext reduceContext = new ReduceContext(bigArrays, scriptService);
|
||||
InternalAggregations aggregations = InternalAggregations.reduce(aggregationsList, reduceContext);
|
||||
if (pipelineAggregators != null) {
|
||||
List<InternalAggregation> newAggs = StreamSupport.stream(aggregations.spliterator(), false)
|
||||
.map((p) -> (InternalAggregation) p)
|
||||
.collect(Collectors.toList());
|
||||
for (SiblingPipelineAggregator pipelineAggregator : pipelineAggregators) {
|
||||
InternalAggregation newAgg = pipelineAggregator.doReduce(new InternalAggregations(newAggs), reduceContext);
|
||||
newAggs.add(newAgg);
|
||||
}
|
||||
return new InternalAggregations(newAggs);
|
||||
}
|
||||
return topN;
|
||||
return aggregations;
|
||||
}
|
||||
|
||||
public static final class ReducedQueryPhase {
|
||||
// the sum of all hits across all reduces shards
|
||||
final long totalHits;
|
||||
// the number of returned hits (doc IDs) across all reduces shards
|
||||
final long fetchHits;
|
||||
// the max score across all reduces hits or {@link Float#NaN} if no hits returned
|
||||
final float maxScore;
|
||||
// <code>true</code> if at least one reduced result timed out
|
||||
final boolean timedOut;
|
||||
// non null and true if at least one reduced result was terminated early
|
||||
final Boolean terminatedEarly;
|
||||
// an non-null arbitrary query result if was at least one reduced result
|
||||
final QuerySearchResult oneResult;
|
||||
// the reduced suggest results
|
||||
final Suggest suggest;
|
||||
// the reduced internal aggregations
|
||||
final InternalAggregations aggregations;
|
||||
// the reduced profile results
|
||||
final SearchProfileShardResults shardResults;
|
||||
|
||||
ReducedQueryPhase(long totalHits, long fetchHits, float maxScore, boolean timedOut, Boolean terminatedEarly,
|
||||
QuerySearchResult oneResult, Suggest suggest, InternalAggregations aggregations,
|
||||
SearchProfileShardResults shardResults) {
|
||||
this.totalHits = totalHits;
|
||||
this.fetchHits = fetchHits;
|
||||
if (Float.isInfinite(maxScore)) {
|
||||
this.maxScore = Float.NaN;
|
||||
} else {
|
||||
this.maxScore = maxScore;
|
||||
}
|
||||
this.timedOut = timedOut;
|
||||
this.terminatedEarly = terminatedEarly;
|
||||
this.oneResult = oneResult;
|
||||
this.suggest = suggest;
|
||||
this.aggregations = aggregations;
|
||||
this.shardResults = shardResults;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new search response from the given merged hits.
|
||||
* @see #merge(boolean, ScoreDoc[], ReducedQueryPhase, AtomicArray)
|
||||
*/
|
||||
public InternalSearchResponse buildResponse(SearchHits hits) {
|
||||
return new InternalSearchResponse(hits, aggregations, suggest, shardResults, timedOut, terminatedEarly);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> iff the query phase had no results. Otherwise <code>false</code>
|
||||
*/
|
||||
public boolean isEmpty() {
|
||||
return oneResult == null;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,64 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
||||
import org.elasticsearch.common.CheckedRunnable;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.fetch.QueryFetchSearchResult;
|
||||
import org.elasticsearch.search.internal.AliasFilter;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.transport.Transport;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.function.Function;
|
||||
|
||||
final class SearchQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<QueryFetchSearchResult> {
|
||||
|
||||
SearchQueryAndFetchAsyncAction(Logger logger, SearchTransportService searchTransportService,
|
||||
Function<String, Transport.Connection> nodeIdToConnection,
|
||||
Map<String, AliasFilter> aliasFilter, Map<String, Float> concreteIndexBoosts,
|
||||
SearchPhaseController searchPhaseController, Executor executor,
|
||||
SearchRequest request, ActionListener<SearchResponse> listener,
|
||||
GroupShardsIterator shardsIts, long startTime, long clusterStateVersion,
|
||||
SearchTask task) {
|
||||
super(logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, searchPhaseController, executor,
|
||||
request, listener, shardsIts, startTime, clusterStateVersion, task);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String initialPhaseName() {
|
||||
return "query_fetch";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void sendExecuteFirstPhase(Transport.Connection connection, ShardSearchTransportRequest request,
|
||||
ActionListener<QueryFetchSearchResult> listener) {
|
||||
searchTransportService.sendExecuteFetch(connection, request, task, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected CheckedRunnable<Exception> getNextPhase(AtomicArray<QueryFetchSearchResult> initialResults) {
|
||||
return () -> sendResponseAsync("fetch", searchPhaseController, null, initialResults, initialResults);
|
||||
}
|
||||
}
|
|
@ -22,10 +22,10 @@ package org.elasticsearch.action.search;
|
|||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
||||
import org.elasticsearch.common.CheckedRunnable;
|
||||
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.internal.AliasFilter;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.search.query.QuerySearchResultProvider;
|
||||
import org.elasticsearch.transport.Transport;
|
||||
|
||||
|
@ -34,6 +34,7 @@ import java.util.concurrent.Executor;
|
|||
import java.util.function.Function;
|
||||
|
||||
final class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<QuerySearchResultProvider> {
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
|
||||
SearchQueryThenFetchAsyncAction(Logger logger, SearchTransportService searchTransportService,
|
||||
Function<String, Transport.Connection> nodeIdToConnection,
|
||||
|
@ -42,23 +43,18 @@ final class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<Qu
|
|||
SearchRequest request, ActionListener<SearchResponse> listener,
|
||||
GroupShardsIterator shardsIts, long startTime, long clusterStateVersion,
|
||||
SearchTask task) {
|
||||
super(logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, searchPhaseController, executor,
|
||||
super("query", logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, executor,
|
||||
request, listener, shardsIts, startTime, clusterStateVersion, task);
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
}
|
||||
|
||||
protected void executePhaseOnShard(ShardIterator shardIt, ShardRouting shard, ActionListener listener) {
|
||||
getSearchTransport().sendExecuteQuery(getConnection(shard.currentNodeId()),
|
||||
buildShardSearchRequest(shardIt, shard), getTask(), listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String initialPhaseName() {
|
||||
return "query";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void sendExecuteFirstPhase(Transport.Connection connection, ShardSearchTransportRequest request,
|
||||
ActionListener<QuerySearchResultProvider> listener) {
|
||||
searchTransportService.sendExecuteQuery(connection, request, task, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected CheckedRunnable<Exception> getNextPhase(AtomicArray<QuerySearchResultProvider> initialResults) {
|
||||
return new FetchPhase(initialResults, searchPhaseController);
|
||||
protected SearchPhase getNextPhase(AtomicArray<QuerySearchResultProvider> results, SearchPhaseContext context) {
|
||||
return new FetchSearchPhase(results, searchPhaseController, context);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -171,8 +171,8 @@ class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction {
|
|||
|
||||
private void innerFinishHim() throws Exception {
|
||||
ScoreDoc[] sortedShardDocs = searchPhaseController.sortDocs(true, queryFetchResults);
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(true, sortedShardDocs, queryFetchResults,
|
||||
queryFetchResults);
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(true, sortedShardDocs,
|
||||
searchPhaseController.reducedQueryPhase(queryFetchResults.asList()), queryFetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = request.scrollId();
|
||||
|
|
|
@ -36,7 +36,6 @@ import org.elasticsearch.search.internal.InternalSearchResponse;
|
|||
import org.elasticsearch.search.query.QuerySearchResult;
|
||||
import org.elasticsearch.search.query.ScrollQuerySearchResult;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
|
@ -170,13 +169,14 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
|
|||
private void executeFetchPhase() throws Exception {
|
||||
sortedShardDocs = searchPhaseController.sortDocs(true, queryResults);
|
||||
if (sortedShardDocs.length == 0) {
|
||||
finishHim();
|
||||
finishHim(searchPhaseController.reducedQueryPhase(queryResults.asList()));
|
||||
return;
|
||||
}
|
||||
|
||||
final IntArrayList[] docIdsToLoad = searchPhaseController.fillDocIdsToLoad(queryResults.length(), sortedShardDocs);
|
||||
final ScoreDoc[] lastEmittedDocPerShard = searchPhaseController.getLastEmittedDocPerShard(queryResults.asList(),
|
||||
sortedShardDocs, queryResults.length());
|
||||
SearchPhaseController.ReducedQueryPhase reducedQueryPhase = searchPhaseController.reducedQueryPhase(queryResults.asList());
|
||||
final ScoreDoc[] lastEmittedDocPerShard = searchPhaseController.getLastEmittedDocPerShard(reducedQueryPhase, sortedShardDocs,
|
||||
queryResults.length());
|
||||
final AtomicInteger counter = new AtomicInteger(docIdsToLoad.length);
|
||||
for (int i = 0; i < docIdsToLoad.length; i++) {
|
||||
final int index = i;
|
||||
|
@ -192,7 +192,7 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
|
|||
result.shardTarget(querySearchResult.shardTarget());
|
||||
fetchResults.set(index, result);
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
finishHim(reducedQueryPhase);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -203,34 +203,30 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
|
|||
}
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
finishHim(reducedQueryPhase);
|
||||
}
|
||||
}
|
||||
});
|
||||
} else {
|
||||
// the counter is set to the total size of docIdsToLoad which can have null values so we have to count them down too
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
finishHim(reducedQueryPhase);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void finishHim() {
|
||||
private void finishHim(SearchPhaseController.ReducedQueryPhase queryPhase) {
|
||||
try {
|
||||
innerFinishHim();
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(true, sortedShardDocs, queryPhase, fetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = request.scrollId();
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, this.scrollId.getContext().length, successfulOps.get(),
|
||||
buildTookInMillis(), buildShardFailures()));
|
||||
} catch (Exception e) {
|
||||
listener.onFailure(new ReduceSearchPhaseException("fetch", "inner finish failed", e, buildShardFailures()));
|
||||
}
|
||||
}
|
||||
|
||||
private void innerFinishHim() {
|
||||
InternalSearchResponse internalResponse = searchPhaseController.merge(true, sortedShardDocs, queryResults, fetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = request.scrollId();
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, this.scrollId.getContext().length, successfulOps.get(),
|
||||
buildTookInMillis(), buildShardFailures()));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionListenerResponseHandler;
|
||||
import org.elasticsearch.action.IndicesRequest;
|
||||
|
@ -55,7 +56,7 @@ import org.elasticsearch.transport.TransportResponse;
|
|||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/**
|
||||
* An encapsulation of {@link org.elasticsearch.search.SearchService} operations exposed through
|
||||
|
@ -77,13 +78,18 @@ public class SearchTransportService extends AbstractLifecycleComponent {
|
|||
|
||||
private final TransportService transportService;
|
||||
private final RemoteClusterService remoteClusterService;
|
||||
private final boolean connectToRemote;
|
||||
|
||||
public SearchTransportService(Settings settings, ClusterSettings clusterSettings, TransportService transportService) {
|
||||
super(settings);
|
||||
this.connectToRemote = RemoteClusterService.ENABLE_REMOTE_CLUSTERS.get(settings);
|
||||
this.transportService = transportService;
|
||||
this.remoteClusterService = new RemoteClusterService(settings, transportService);
|
||||
clusterSettings.addAffixUpdateConsumer(RemoteClusterService.REMOTE_CLUSTERS_SEEDS, remoteClusterService::updateRemoteCluster,
|
||||
(namespace, value) -> {});
|
||||
if (connectToRemote) {
|
||||
clusterSettings.addAffixUpdateConsumer(RemoteClusterService.REMOTE_CLUSTERS_SEEDS, remoteClusterService::updateRemoteCluster,
|
||||
(namespace, value) -> {
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
public void sendFreeContext(Transport.Connection connection, final long contextId, SearchRequest request) {
|
||||
|
@ -119,8 +125,18 @@ public class SearchTransportService extends AbstractLifecycleComponent {
|
|||
|
||||
public void sendExecuteQuery(Transport.Connection connection, final ShardSearchTransportRequest request, SearchTask task,
|
||||
final ActionListener<QuerySearchResultProvider> listener) {
|
||||
transportService.sendChildRequest(connection, QUERY_ACTION_NAME, request, task,
|
||||
new ActionListenerResponseHandler<>(listener, QuerySearchResult::new));
|
||||
// we optimize this and expect a QueryFetchSearchResult if we only have a single shard in the search request
|
||||
// this used to be the QUERY_AND_FETCH which doesn't exists anymore.
|
||||
final boolean fetchDocuments = request.numberOfShards() == 1;
|
||||
Supplier<QuerySearchResultProvider> supplier = fetchDocuments ? QueryFetchSearchResult::new : QuerySearchResult::new;
|
||||
if (connection.getVersion().onOrBefore(Version.V_5_3_0_UNRELEASED) && fetchDocuments) {
|
||||
// TODO this BWC layer can be removed once this is back-ported to 5.3
|
||||
transportService.sendChildRequest(connection, QUERY_FETCH_ACTION_NAME, request, task,
|
||||
new ActionListenerResponseHandler<>(listener, supplier));
|
||||
} else {
|
||||
transportService.sendChildRequest(connection, QUERY_ACTION_NAME, request, task,
|
||||
new ActionListenerResponseHandler<>(listener, supplier));
|
||||
}
|
||||
}
|
||||
|
||||
public void sendExecuteQuery(Transport.Connection connection, final QuerySearchRequest request, SearchTask task,
|
||||
|
@ -135,12 +151,6 @@ public class SearchTransportService extends AbstractLifecycleComponent {
|
|||
new ActionListenerResponseHandler<>(listener, ScrollQuerySearchResult::new));
|
||||
}
|
||||
|
||||
public void sendExecuteFetch(Transport.Connection connection, final ShardSearchTransportRequest request, SearchTask task,
|
||||
final ActionListener<QueryFetchSearchResult> listener) {
|
||||
transportService.sendChildRequest(connection, QUERY_FETCH_ACTION_NAME, request, task,
|
||||
new ActionListenerResponseHandler<>(listener, QueryFetchSearchResult::new));
|
||||
}
|
||||
|
||||
public void sendExecuteFetch(DiscoveryNode node, final InternalScrollSearchRequest request, SearchTask task,
|
||||
final ActionListener<ScrollQueryFetchSearchResult> listener) {
|
||||
transportService.sendChildRequest(transportService.getConnection(node), QUERY_FETCH_SCROLL_ACTION_NAME, request, task,
|
||||
|
@ -163,6 +173,15 @@ public class SearchTransportService extends AbstractLifecycleComponent {
|
|||
new ActionListenerResponseHandler<>(listener, FetchSearchResult::new));
|
||||
}
|
||||
|
||||
/**
|
||||
* Used by {@link TransportSearchAction} to send the expand queries (field collapsing).
|
||||
*/
|
||||
void sendExecuteMultiSearch(final MultiSearchRequest request, SearchTask task,
|
||||
final ActionListener<MultiSearchResponse> listener) {
|
||||
transportService.sendChildRequest(transportService.getConnection(transportService.getLocalNode()), MultiSearchAction.NAME, request,
|
||||
task, new ActionListenerResponseHandler<>(listener, MultiSearchResponse::new));
|
||||
}
|
||||
|
||||
public RemoteClusterService getRemoteClusterService() {
|
||||
return remoteClusterService;
|
||||
}
|
||||
|
@ -334,11 +353,15 @@ public class SearchTransportService extends AbstractLifecycleComponent {
|
|||
});
|
||||
TransportActionProxy.registerProxyAction(transportService, QUERY_SCROLL_ACTION_NAME, ScrollQuerySearchResult::new);
|
||||
|
||||
// this is for BWC with 5.3 until the QUERY_AND_FETCH removal change has been back-ported to 5.x
|
||||
// in 5.3 we will only execute a `indices:data/read/search[phase/query+fetch]` if the node is pre 5.3
|
||||
// such that we can remove this after the back-port.
|
||||
transportService.registerRequestHandler(QUERY_FETCH_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH,
|
||||
new TaskAwareTransportRequestHandler<ShardSearchTransportRequest>() {
|
||||
@Override
|
||||
public void messageReceived(ShardSearchTransportRequest request, TransportChannel channel, Task task) throws Exception {
|
||||
QueryFetchSearchResult result = searchService.executeFetchPhase(request, (SearchTask)task);
|
||||
assert request.numberOfShards() == 1 : "expected single shard request but got: " + request.numberOfShards();
|
||||
QuerySearchResultProvider result = searchService.executeQueryPhase(request, (SearchTask)task);
|
||||
channel.sendResponse(result);
|
||||
}
|
||||
});
|
||||
|
@ -381,8 +404,10 @@ public class SearchTransportService extends AbstractLifecycleComponent {
|
|||
|
||||
@Override
|
||||
protected void doStart() {
|
||||
// here we start to connect to the remote clusters
|
||||
remoteClusterService.initializeRemoteClusters();
|
||||
if (connectToRemote) {
|
||||
// here we start to connect to the remote clusters
|
||||
remoteClusterService.initializeRemoteClusters();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -36,14 +36,9 @@ public enum SearchType {
|
|||
* document content. The return number of hits is exactly as specified in size, since they are the only ones that
|
||||
* are fetched. This is very handy when the index has a lot of shards (not replicas, shard id groups).
|
||||
*/
|
||||
QUERY_THEN_FETCH((byte) 1),
|
||||
QUERY_THEN_FETCH((byte) 1);
|
||||
// 2 used to be DFS_QUERY_AND_FETCH
|
||||
/**
|
||||
* The most naive (and possibly fastest) implementation is to simply execute the query on all relevant shards
|
||||
* and return the results. Each shard returns size results. Since each shard already returns size hits, this
|
||||
* type actually returns size times number of shards results back to the caller.
|
||||
*/
|
||||
QUERY_AND_FETCH((byte) 3);
|
||||
// 3 used to be QUERY_AND_FETCH
|
||||
|
||||
/**
|
||||
* The default search type ({@link #QUERY_THEN_FETCH}.
|
||||
|
@ -69,10 +64,9 @@ public enum SearchType {
|
|||
public static SearchType fromId(byte id) {
|
||||
if (id == 0) {
|
||||
return DFS_QUERY_THEN_FETCH;
|
||||
} else if (id == 1) {
|
||||
} else if (id == 1
|
||||
|| id == 3) { // TODO this bwc layer can be removed once this is back-ported to 5.3 QUERY_AND_FETCH is removed now
|
||||
return QUERY_THEN_FETCH;
|
||||
} else if (id == 3) {
|
||||
return QUERY_AND_FETCH;
|
||||
} else {
|
||||
throw new IllegalArgumentException("No search type for [" + id + "]");
|
||||
}
|
||||
|
@ -91,8 +85,6 @@ public enum SearchType {
|
|||
return SearchType.DFS_QUERY_THEN_FETCH;
|
||||
} else if ("query_then_fetch".equals(searchType)) {
|
||||
return SearchType.QUERY_THEN_FETCH;
|
||||
} else if ("query_and_fetch".equals(searchType)) {
|
||||
return SearchType.QUERY_AND_FETCH;
|
||||
} else {
|
||||
throw new IllegalArgumentException("No search type for [" + searchType + "]");
|
||||
}
|
||||
|
|
|
@ -51,10 +51,8 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Function;
|
||||
|
||||
import static org.elasticsearch.action.search.SearchType.QUERY_AND_FETCH;
|
||||
import static org.elasticsearch.action.search.SearchType.QUERY_THEN_FETCH;
|
||||
|
||||
public class TransportSearchAction extends HandledTransportAction<SearchRequest, SearchResponse> {
|
||||
|
@ -185,7 +183,7 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
|||
// optimize search type for cases where there is only one shard group to search on
|
||||
if (shardIterators.size() == 1) {
|
||||
// if we only have one group, then we always want Q_A_F, no need for DFS, and no need to do THEN since we hit one shard
|
||||
searchRequest.searchType(QUERY_AND_FETCH);
|
||||
searchRequest.searchType(QUERY_THEN_FETCH);
|
||||
}
|
||||
if (searchRequest.isSuggestOnly()) {
|
||||
// disable request cache if we have only suggest
|
||||
|
@ -213,22 +211,8 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
|||
return connection;
|
||||
};
|
||||
|
||||
final ActionListener<SearchResponse> wrapper;
|
||||
if (searchPhaseController.getSearchResponseListener().size() > 0) {
|
||||
wrapper = ActionListener.wrap(searchResponse -> {
|
||||
List<BiConsumer<SearchRequest, SearchResponse>> responseListeners =
|
||||
searchPhaseController.getSearchResponseListener();
|
||||
for (BiConsumer<SearchRequest, SearchResponse> respListener : responseListeners) {
|
||||
respListener.accept(searchRequest, searchResponse);
|
||||
}
|
||||
listener.onResponse(searchResponse);
|
||||
|
||||
}, listener::onFailure);
|
||||
} else {
|
||||
wrapper = listener;
|
||||
}
|
||||
searchAsyncAction(task, searchRequest, shardIterators, startTimeInMillis, connectionLookup, clusterState.version(),
|
||||
Collections.unmodifiableMap(aliasFilter), concreteIndexBoosts, wrapper).start();
|
||||
Collections.unmodifiableMap(aliasFilter), concreteIndexBoosts, listener).start();
|
||||
}
|
||||
|
||||
private static GroupShardsIterator mergeShardsIterators(GroupShardsIterator localShardsIterator,
|
||||
|
@ -269,11 +253,6 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
|||
aliasFilter, concreteIndexBoosts, searchPhaseController, executor, searchRequest, listener, shardIterators, startTime,
|
||||
clusterStateVersion, task);
|
||||
break;
|
||||
case QUERY_AND_FETCH:
|
||||
searchAsyncAction = new SearchQueryAndFetchAsyncAction(logger, searchTransportService, connectionLookup,
|
||||
aliasFilter, concreteIndexBoosts, searchPhaseController, executor, searchRequest, listener, shardIterators, startTime,
|
||||
clusterStateVersion, task);
|
||||
break;
|
||||
default:
|
||||
throw new IllegalStateException("Unknown search type: [" + searchRequest.searchType() + "]");
|
||||
}
|
||||
|
|
|
@ -34,19 +34,9 @@ final class TransportSearchHelper {
|
|||
return new InternalScrollSearchRequest(request, id);
|
||||
}
|
||||
|
||||
static String buildScrollId(SearchType searchType, AtomicArray<? extends SearchPhaseResult> searchPhaseResults) throws IOException {
|
||||
if (searchType == SearchType.DFS_QUERY_THEN_FETCH || searchType == SearchType.QUERY_THEN_FETCH) {
|
||||
return buildScrollId(ParsedScrollId.QUERY_THEN_FETCH_TYPE, searchPhaseResults);
|
||||
} else if (searchType == SearchType.QUERY_AND_FETCH) {
|
||||
return buildScrollId(ParsedScrollId.QUERY_AND_FETCH_TYPE, searchPhaseResults);
|
||||
} else {
|
||||
throw new IllegalStateException("search_type [" + searchType + "] not supported");
|
||||
}
|
||||
}
|
||||
|
||||
static String buildScrollId(String type, AtomicArray<? extends SearchPhaseResult> searchPhaseResults) throws IOException {
|
||||
static String buildScrollId(AtomicArray<? extends SearchPhaseResult> searchPhaseResults) throws IOException {
|
||||
try (RAMOutputStream out = new RAMOutputStream()) {
|
||||
out.writeString(type);
|
||||
out.writeString(searchPhaseResults.length() == 1 ? ParsedScrollId.QUERY_AND_FETCH_TYPE : ParsedScrollId.QUERY_THEN_FETCH_TYPE);
|
||||
out.writeVInt(searchPhaseResults.asList().size());
|
||||
for (AtomicArray.Entry<? extends SearchPhaseResult> entry : searchPhaseResults.asList()) {
|
||||
SearchPhaseResult searchPhaseResult = entry.value;
|
||||
|
|
|
@ -66,7 +66,7 @@ public class TransportSearchScrollAction extends HandledTransportAction<SearchSc
|
|||
action = new SearchScrollQueryThenFetchAsyncAction(logger, clusterService, searchTransportService,
|
||||
searchPhaseController, request, (SearchTask)task, scrollId, listener);
|
||||
break;
|
||||
case QUERY_AND_FETCH_TYPE:
|
||||
case QUERY_AND_FETCH_TYPE: // TODO can we get rid of this?
|
||||
action = new SearchScrollQueryAndFetchAsyncAction(logger, clusterService, searchTransportService,
|
||||
searchPhaseController, request, (SearchTask)task, scrollId, listener);
|
||||
break;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue